hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71938dd1d111ba18183cd67737d4ea3ac849931
| 863
|
py
|
Python
|
Software/Estadística/MCMC/HS/CC+SN_int1/4params/analisis_cadenas_4params.py
|
matiasleize/tesis_licenciatura
|
5df6e341314583702b466b8ed7977d410f0ee457
|
[
"MIT"
] | null | null | null |
Software/Estadística/MCMC/HS/CC+SN_int1/4params/analisis_cadenas_4params.py
|
matiasleize/tesis_licenciatura
|
5df6e341314583702b466b8ed7977d410f0ee457
|
[
"MIT"
] | null | null | null |
Software/Estadística/MCMC/HS/CC+SN_int1/4params/analisis_cadenas_4params.py
|
matiasleize/tesis_licenciatura
|
5df6e341314583702b466b8ed7977d410f0ee457
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
import emcee
import sys
import os
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/Clases')
from funciones_graficador import Graficador
#%% Importo las cadenas
os.chdir(path_datos_global+'/Resultados_cadenas/')
filename = "sample_HS_CC+SN_4params_int1.h5"
reader = emcee.backends.HDFBackend(filename)
# Algunos valores
tau = reader.get_autocorr_time()
burnin = int(2 * np.max(tau))
thin = int(0.5 * np.min(tau))
#%%
%matplotlib qt5
#burnin=100
#thin=1
analisis = Graficador(reader, ['$M_{abs}$','$\Omega_{m}^{\Lambda CDM}$','b','$H_{0}^{\Lambda CDM}$'],'1 SNIA + CC (HS)')
analisis.graficar_contornos(discard=burnin, thin=thin, poster=False,color='r')
#%%
analisis.graficar_cadenas()
analisis.reportar_intervalos()
| 26.96875
| 120
| 0.753187
|
import numpy as np
from matplotlib import pyplot as plt
import emcee
import sys
import os
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/Clases')
from funciones_graficador import Graficador
os.chdir(path_datos_global+'/Resultados_cadenas/')
filename = "sample_HS_CC+SN_4params_int1.h5"
reader = emcee.backends.HDFBackend(filename)
tau = reader.get_autocorr_time()
burnin = int(2 * np.max(tau))
thin = int(0.5 * np.min(tau))
%matplotlib qt5
analisis = Graficador(reader, ['$M_{abs}$','$\Omega_{m}^{\Lambda CDM}$','b','$H_{0}^{\Lambda CDM}$'],'1 SNIA + CC (HS)')
analisis.graficar_contornos(discard=burnin, thin=thin, poster=False,color='r')
analisis.graficar_cadenas()
analisis.reportar_intervalos()
| false
| true
|
f71939e1d16adffd88e34ce88da8f38f90363eca
| 2,079
|
py
|
Python
|
scripts/sdk_fetch_coverage_tools.py
|
PelionIoT/mbed-cloud-sdk-java
|
cc99c51db43cc9ae36601f20f20b7d8cd7515432
|
[
"Apache-2.0"
] | 7
|
2017-12-28T11:19:15.000Z
|
2020-03-23T19:15:31.000Z
|
scripts/sdk_fetch_coverage_tools.py
|
PelionIoT/mbed-cloud-sdk-java
|
cc99c51db43cc9ae36601f20f20b7d8cd7515432
|
[
"Apache-2.0"
] | 99
|
2018-01-09T23:56:13.000Z
|
2020-11-03T05:20:55.000Z
|
scripts/sdk_fetch_coverage_tools.py
|
PelionIoT/mbed-cloud-sdk-java
|
cc99c51db43cc9ae36601f20f20b7d8cd7515432
|
[
"Apache-2.0"
] | 5
|
2018-08-02T06:29:18.000Z
|
2019-10-23T11:43:59.000Z
|
#!/usr/bin/python
import os
import sdk_common
# Block in charge of fetching code coverage tools
class SDKCoverageToolsFetcher(sdk_common.BuildStepUsingGradle):
def __init__(self, logger=None):
super(SDKCoverageToolsFetcher, self).__init__('SDK Coverage tools fetch', logger)
self.is_code_coverage = self.common_config.get_config().should_perform_code_coverage()
self.artifacts_parser = self.common_config.get_config().get_new_artifact_log_parser(self)
self.jacoco_cli_name = 'jacococli.jar'
def retrieve_folder_location(self, key):
if not key:
return None
self.artifacts_parser.load()
return self.clean_path(
self.artifacts_parser.get_property(key),
False)
def check_whether_coverage_result_folder_has_been_created(self):
code_coverage_result_dir = self.retrieve_folder_location('SDK_COVERAGE_RESULTS_DIR')
return False if not code_coverage_result_dir else os.path.exists(code_coverage_result_dir)
def check_whether_tools_have_been_copied(self):
code_coverage_tools_dir = self.retrieve_folder_location('SDK_COVERAGE_TOOLS_DIR')
return False if not code_coverage_tools_dir else (
os.path.exists(code_coverage_tools_dir) and len(
os.listdir(code_coverage_tools_dir)) >= 2) # TODO change if fewer tools are used
def has_already_been_run(self):
return self.check_whether_coverage_result_folder_has_been_created() and self.check_whether_tools_have_been_copied()
def execute(self):
self.print_title()
try:
if self.is_code_coverage:
self.log_info("Retrieving code coverage tools")
if not self.has_already_been_run():
self.execute_gradle_task("copyCoverageAgent")
else:
self.log_info("Tools are already present.")
except:
self.log_error('Failed to retrieving code coverage tools')
return False
self.log_info("Done.")
return True
| 40.764706
| 123
| 0.696489
|
import os
import sdk_common
class SDKCoverageToolsFetcher(sdk_common.BuildStepUsingGradle):
def __init__(self, logger=None):
super(SDKCoverageToolsFetcher, self).__init__('SDK Coverage tools fetch', logger)
self.is_code_coverage = self.common_config.get_config().should_perform_code_coverage()
self.artifacts_parser = self.common_config.get_config().get_new_artifact_log_parser(self)
self.jacoco_cli_name = 'jacococli.jar'
def retrieve_folder_location(self, key):
if not key:
return None
self.artifacts_parser.load()
return self.clean_path(
self.artifacts_parser.get_property(key),
False)
def check_whether_coverage_result_folder_has_been_created(self):
code_coverage_result_dir = self.retrieve_folder_location('SDK_COVERAGE_RESULTS_DIR')
return False if not code_coverage_result_dir else os.path.exists(code_coverage_result_dir)
def check_whether_tools_have_been_copied(self):
code_coverage_tools_dir = self.retrieve_folder_location('SDK_COVERAGE_TOOLS_DIR')
return False if not code_coverage_tools_dir else (
os.path.exists(code_coverage_tools_dir) and len(
os.listdir(code_coverage_tools_dir)) >= 2)
def has_already_been_run(self):
return self.check_whether_coverage_result_folder_has_been_created() and self.check_whether_tools_have_been_copied()
def execute(self):
self.print_title()
try:
if self.is_code_coverage:
self.log_info("Retrieving code coverage tools")
if not self.has_already_been_run():
self.execute_gradle_task("copyCoverageAgent")
else:
self.log_info("Tools are already present.")
except:
self.log_error('Failed to retrieving code coverage tools')
return False
self.log_info("Done.")
return True
| true
| true
|
f7193a1de09a2338512e1f71556799b0418fb19a
| 683
|
py
|
Python
|
app/core/migrations/0002_tag.py
|
bwanarm/recipe-app-api
|
1204280495547ceb93a59cd2ec2b1c2a82ef187d
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
bwanarm/recipe-app-api
|
1204280495547ceb93a59cd2ec2b1c2a82ef187d
|
[
"MIT"
] | null | null | null |
app/core/migrations/0002_tag.py
|
bwanarm/recipe-app-api
|
1204280495547ceb93a59cd2ec2b1c2a82ef187d
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-07-31 13:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333
| 118
| 0.616398
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true
| true
|
f7193a2229b00e7439ffb31eaf7bc0964fc3bb54
| 10,877
|
py
|
Python
|
pretrained-model/stt/hubert/conformer-tiny-ctc.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | null | null | null |
pretrained-model/stt/hubert/conformer-tiny-ctc.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | null | null | null |
pretrained-model/stt/hubert/conformer-tiny-ctc.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | null | null | null |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import pyroomacoustics as pra
import numpy as np
from pydub import AudioSegment
from sklearn.utils import shuffle
from glob import glob
import random
import json
from malaya_speech.train.model.conformer.model import Model as ConformerModel
from malaya_speech.train.model import hubert, ctc
import malaya_speech.train as train
import malaya_speech.config
import malaya_speech.augmentation.waveform as augmentation
import malaya_speech
import tensorflow as tf
import os
import string
sr = 16000
maxlen = 18
minlen_text = 1
prob_aug = 0.95
unique_vocab = [''] + list(string.ascii_lowercase + string.digits) + [' ']
def augment_room(y, scale=1.0):
corners = np.array(
[[0, 0], [0, 5 * scale], [3 * scale, 5 * scale], [3 * scale, 0]]
).T
room = pra.Room.from_corners(
corners,
fs=sr,
materials=pra.Material(0.2, 0.15),
ray_tracing=True,
air_absorption=True,
)
room.extrude(3.5, materials=pra.Material(0.2, 0.15))
room.set_ray_tracing(
receiver_radius=0.5, n_rays=1000, energy_thres=1e-5
)
room.add_source([1.5 * scale, 4 * scale, 0.5], signal=y)
R = np.array([[1.5 * scale], [0.5 * scale], [0.5]])
room.add_microphone(R)
room.simulate()
return room.mic_array.signals[0]
def random_amplitude_threshold(sample, low=1, high=2, threshold=0.4):
y_aug = sample.copy()
dyn_change = np.random.uniform(low=low, high=high)
y_aug[np.abs(y_aug) >= threshold] = (
y_aug[np.abs(y_aug) >= threshold] * dyn_change
)
return np.clip(y_aug, -1, 1)
def add_uniform_noise(
sample, power=0.01, return_noise=False, scale=False
):
y_noise = sample.copy()
noise_amp = power * np.random.uniform() * np.amax(y_noise)
noise = noise_amp * np.random.normal(size=y_noise.shape[0])
y_noise = y_noise + noise
if scale:
y_noise = y_noise / (np.max(np.abs(y_noise)) + 1e-9)
if return_noise:
if scale:
noise = noise / (np.max(np.abs(y_noise)) + 1e-9)
return y_noise, noise
else:
return y_noise
def calc(signal, add_uniform=True):
choice = random.randint(0, 10)
print('choice', choice)
if choice == 0:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 50),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=1,
)
if choice == 1:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 70),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=0,
)
if choice == 2:
x = augmentation.sox_augment_low(
signal,
min_bass_gain=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=random.randint(0, 1),
)
if choice == 3:
x = augmentation.sox_augment_combine(
signal,
min_bass_gain_high=random.randint(25, 70),
min_bass_gain_low=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 90),
)
if choice == 4:
x = augmentation.sox_reverb(
signal,
reverberance=random.randint(10, 80),
hf_damping=10,
room_scale=random.randint(10, 90),
)
if choice == 5:
x = random_amplitude_threshold(
signal, threshold=random.uniform(0.35, 0.8)
)
if choice == 6:
x = augmentation.lowpass_filter(
signal, sr=sr, cutoff=random.randint(200, 551)
)
if choice == 7:
x = augmentation.highpass_filter(
signal, sr=sr, cutoff=random.randint(551, 1653)
)
if choice == 8:
x = augmentation.bandpass_filter(
signal,
sr=sr,
cutoff_low=random.randint(200, 551),
cutoff_high=random.randint(551, 1653),
)
if choice == 9:
x = augment_room(signal)
if choice == 10:
x = signal
if choice not in [5] and random.gauss(0.5, 0.14) > 0.6:
x = random_amplitude_threshold(
x, low=1.0, high=2.0, threshold=random.uniform(0.6, 0.9)
)
if random.gauss(0.5, 0.14) > 0.6 and add_uniform:
x = add_uniform_noise(x, power=random.uniform(0.005, 0.015))
return x
def mp3_to_wav(file, sr=sr):
audio = AudioSegment.from_file(file)
audio = audio.set_frame_rate(sr).set_channels(1)
sample = np.array(audio.get_array_of_samples())
return malaya_speech.astype.int_to_float(sample), sr
def generate(file):
with open(file) as fopen:
dataset = json.load(fopen)
audios, cleaned_texts = dataset['X'], dataset['Y']
while True:
audios, cleaned_texts = shuffle(audios, cleaned_texts)
for i in range(len(audios)):
try:
if audios[i].endswith('.mp3'):
# print('found mp3', audios[i])
wav_data, _ = mp3_to_wav(audios[i])
else:
wav_data, _ = malaya_speech.load(audios[i], sr=sr)
if len(cleaned_texts[i]) < minlen_text:
# print(f'skipped text too short {audios[i]}')
continue
if (len(wav_data) / sr) > maxlen:
continue
t = [unique_vocab.index(c) for c in cleaned_texts[i]]
yield {
'waveforms': wav_data,
'waveforms_length': [len(wav_data)],
'targets': t,
'targets_length': [len(t)],
}
except Exception as e:
print(e)
def get_dataset(
file,
batch_size=12,
shuffle_size=20,
thread_count=24,
maxlen_feature=1800,
):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'waveforms': tf.float32,
'waveforms_length': tf.int32,
'targets': tf.int32,
'targets_length': tf.int32,
},
output_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
args=(file,),
)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
padding_values={
'waveforms': tf.constant(0, dtype=tf.float32),
'waveforms_length': tf.constant(0, dtype=tf.int32),
'targets': tf.constant(0, dtype=tf.int32),
'targets_length': tf.constant(0, dtype=tf.int32),
},
)
return dataset
return get
class Encoder:
def __init__(self, config):
self.config = config
self.encoder = ConformerModel(**self.config)
def __call__(self, x, input_mask, training=True):
return self.encoder(x, training=training)
total_steps = 2000000
def model_fn(features, labels, mode, params):
config_conformer = malaya_speech.config.conformer_tiny_encoder_config
config_conformer['subsampling']['type'] = 'none'
config_conformer['dropout'] = 0.0
encoder = Encoder(config_conformer)
cfg = hubert.HuBERTConfig(
extractor_mode='layer_norm',
dropout=0.0,
attention_dropout=0.0,
encoder_layerdrop=0.0,
dropout_input=0.0,
dropout_features=0.0,
final_dim=128,
)
model = hubert.Model(cfg, encoder, ['pad', 'eos', 'unk'] + [str(i) for i in range(100)])
X = features['waveforms']
X_len = features['waveforms_length'][:, 0]
targets = features['targets']
targets_int32 = tf.cast(targets, tf.int32)
targets_length = features['targets_length'][:, 0]
r = model(X, padding_mask=X_len, features_only=True, mask=False)
logits = tf.layers.dense(r['x'], len(unique_vocab) + 1)
seq_lens = tf.reduce_sum(
tf.cast(tf.logical_not(r['padding_mask']), tf.int32), axis=1
)
mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(
logits, seq_lens, targets_int32, targets_length
)
loss = mean_error
accuracy = ctc.metrics.ctc_sequence_accuracy(
logits, seq_lens, targets_int32, targets_length,
)
tf.identity(loss, 'train_loss')
tf.identity(accuracy, name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy)
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
init_checkpoint = 'hubert-conformer-tiny/model.ckpt-1000000'
assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss,
init_lr=5e-5,
num_train_steps=total_steps,
num_warmup_steps=100000,
end_learning_rate=0.0,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
clip_norm=1.0,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(
logits, seq_lens, targets_int32, targets_length
)
},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
train_dataset = get_dataset('bahasa-asr-train-combined.json')
dev_dataset = get_dataset('bahasa-asr-test.json')
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir='hubert-conformer-tiny-ctc-char',
num_gpus=1,
log_step=1,
save_checkpoint_step=20000,
max_steps=total_steps,
eval_fn=dev_dataset,
train_hooks=train_hooks,
)
| 30.639437
| 92
| 0.590144
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import pyroomacoustics as pra
import numpy as np
from pydub import AudioSegment
from sklearn.utils import shuffle
from glob import glob
import random
import json
from malaya_speech.train.model.conformer.model import Model as ConformerModel
from malaya_speech.train.model import hubert, ctc
import malaya_speech.train as train
import malaya_speech.config
import malaya_speech.augmentation.waveform as augmentation
import malaya_speech
import tensorflow as tf
import os
import string
sr = 16000
maxlen = 18
minlen_text = 1
prob_aug = 0.95
unique_vocab = [''] + list(string.ascii_lowercase + string.digits) + [' ']
def augment_room(y, scale=1.0):
corners = np.array(
[[0, 0], [0, 5 * scale], [3 * scale, 5 * scale], [3 * scale, 0]]
).T
room = pra.Room.from_corners(
corners,
fs=sr,
materials=pra.Material(0.2, 0.15),
ray_tracing=True,
air_absorption=True,
)
room.extrude(3.5, materials=pra.Material(0.2, 0.15))
room.set_ray_tracing(
receiver_radius=0.5, n_rays=1000, energy_thres=1e-5
)
room.add_source([1.5 * scale, 4 * scale, 0.5], signal=y)
R = np.array([[1.5 * scale], [0.5 * scale], [0.5]])
room.add_microphone(R)
room.simulate()
return room.mic_array.signals[0]
def random_amplitude_threshold(sample, low=1, high=2, threshold=0.4):
y_aug = sample.copy()
dyn_change = np.random.uniform(low=low, high=high)
y_aug[np.abs(y_aug) >= threshold] = (
y_aug[np.abs(y_aug) >= threshold] * dyn_change
)
return np.clip(y_aug, -1, 1)
def add_uniform_noise(
sample, power=0.01, return_noise=False, scale=False
):
y_noise = sample.copy()
noise_amp = power * np.random.uniform() * np.amax(y_noise)
noise = noise_amp * np.random.normal(size=y_noise.shape[0])
y_noise = y_noise + noise
if scale:
y_noise = y_noise / (np.max(np.abs(y_noise)) + 1e-9)
if return_noise:
if scale:
noise = noise / (np.max(np.abs(y_noise)) + 1e-9)
return y_noise, noise
else:
return y_noise
def calc(signal, add_uniform=True):
choice = random.randint(0, 10)
print('choice', choice)
if choice == 0:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 50),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=1,
)
if choice == 1:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 70),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=0,
)
if choice == 2:
x = augmentation.sox_augment_low(
signal,
min_bass_gain=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=random.randint(0, 1),
)
if choice == 3:
x = augmentation.sox_augment_combine(
signal,
min_bass_gain_high=random.randint(25, 70),
min_bass_gain_low=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 90),
)
if choice == 4:
x = augmentation.sox_reverb(
signal,
reverberance=random.randint(10, 80),
hf_damping=10,
room_scale=random.randint(10, 90),
)
if choice == 5:
x = random_amplitude_threshold(
signal, threshold=random.uniform(0.35, 0.8)
)
if choice == 6:
x = augmentation.lowpass_filter(
signal, sr=sr, cutoff=random.randint(200, 551)
)
if choice == 7:
x = augmentation.highpass_filter(
signal, sr=sr, cutoff=random.randint(551, 1653)
)
if choice == 8:
x = augmentation.bandpass_filter(
signal,
sr=sr,
cutoff_low=random.randint(200, 551),
cutoff_high=random.randint(551, 1653),
)
if choice == 9:
x = augment_room(signal)
if choice == 10:
x = signal
if choice not in [5] and random.gauss(0.5, 0.14) > 0.6:
x = random_amplitude_threshold(
x, low=1.0, high=2.0, threshold=random.uniform(0.6, 0.9)
)
if random.gauss(0.5, 0.14) > 0.6 and add_uniform:
x = add_uniform_noise(x, power=random.uniform(0.005, 0.015))
return x
def mp3_to_wav(file, sr=sr):
audio = AudioSegment.from_file(file)
audio = audio.set_frame_rate(sr).set_channels(1)
sample = np.array(audio.get_array_of_samples())
return malaya_speech.astype.int_to_float(sample), sr
def generate(file):
with open(file) as fopen:
dataset = json.load(fopen)
audios, cleaned_texts = dataset['X'], dataset['Y']
while True:
audios, cleaned_texts = shuffle(audios, cleaned_texts)
for i in range(len(audios)):
try:
if audios[i].endswith('.mp3'):
wav_data, _ = mp3_to_wav(audios[i])
else:
wav_data, _ = malaya_speech.load(audios[i], sr=sr)
if len(cleaned_texts[i]) < minlen_text:
continue
if (len(wav_data) / sr) > maxlen:
continue
t = [unique_vocab.index(c) for c in cleaned_texts[i]]
yield {
'waveforms': wav_data,
'waveforms_length': [len(wav_data)],
'targets': t,
'targets_length': [len(t)],
}
except Exception as e:
print(e)
def get_dataset(
file,
batch_size=12,
shuffle_size=20,
thread_count=24,
maxlen_feature=1800,
):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'waveforms': tf.float32,
'waveforms_length': tf.int32,
'targets': tf.int32,
'targets_length': tf.int32,
},
output_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
args=(file,),
)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
padding_values={
'waveforms': tf.constant(0, dtype=tf.float32),
'waveforms_length': tf.constant(0, dtype=tf.int32),
'targets': tf.constant(0, dtype=tf.int32),
'targets_length': tf.constant(0, dtype=tf.int32),
},
)
return dataset
return get
class Encoder:
def __init__(self, config):
self.config = config
self.encoder = ConformerModel(**self.config)
def __call__(self, x, input_mask, training=True):
return self.encoder(x, training=training)
total_steps = 2000000
def model_fn(features, labels, mode, params):
config_conformer = malaya_speech.config.conformer_tiny_encoder_config
config_conformer['subsampling']['type'] = 'none'
config_conformer['dropout'] = 0.0
encoder = Encoder(config_conformer)
cfg = hubert.HuBERTConfig(
extractor_mode='layer_norm',
dropout=0.0,
attention_dropout=0.0,
encoder_layerdrop=0.0,
dropout_input=0.0,
dropout_features=0.0,
final_dim=128,
)
model = hubert.Model(cfg, encoder, ['pad', 'eos', 'unk'] + [str(i) for i in range(100)])
X = features['waveforms']
X_len = features['waveforms_length'][:, 0]
targets = features['targets']
targets_int32 = tf.cast(targets, tf.int32)
targets_length = features['targets_length'][:, 0]
r = model(X, padding_mask=X_len, features_only=True, mask=False)
logits = tf.layers.dense(r['x'], len(unique_vocab) + 1)
seq_lens = tf.reduce_sum(
tf.cast(tf.logical_not(r['padding_mask']), tf.int32), axis=1
)
mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(
logits, seq_lens, targets_int32, targets_length
)
loss = mean_error
accuracy = ctc.metrics.ctc_sequence_accuracy(
logits, seq_lens, targets_int32, targets_length,
)
tf.identity(loss, 'train_loss')
tf.identity(accuracy, name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy)
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
init_checkpoint = 'hubert-conformer-tiny/model.ckpt-1000000'
assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss,
init_lr=5e-5,
num_train_steps=total_steps,
num_warmup_steps=100000,
end_learning_rate=0.0,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
clip_norm=1.0,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(
logits, seq_lens, targets_int32, targets_length
)
},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
train_dataset = get_dataset('bahasa-asr-train-combined.json')
dev_dataset = get_dataset('bahasa-asr-test.json')
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir='hubert-conformer-tiny-ctc-char',
num_gpus=1,
log_step=1,
save_checkpoint_step=20000,
max_steps=total_steps,
eval_fn=dev_dataset,
train_hooks=train_hooks,
)
| true
| true
|
f7193aa46ca7cccda6fa00809b1c48838617c057
| 9,046
|
py
|
Python
|
killerbee/dev_telosb.py
|
Acesonnall/killerbee
|
354c68bcf21f60910d9f68f62285b977db76fb60
|
[
"BSD-3-Clause"
] | 2
|
2019-06-16T06:53:46.000Z
|
2022-02-18T01:05:36.000Z
|
killerbee/dev_telosb.py
|
Acesonnall/killerbee
|
354c68bcf21f60910d9f68f62285b977db76fb60
|
[
"BSD-3-Clause"
] | 1
|
2019-11-23T17:16:55.000Z
|
2019-11-23T17:16:55.000Z
|
killerbee/dev_telosb.py
|
Acesonnall/killerbee
|
354c68bcf21f60910d9f68f62285b977db76fb60
|
[
"BSD-3-Clause"
] | 2
|
2019-06-15T15:54:36.000Z
|
2019-06-15T15:55:39.000Z
|
'''
Support for the TelosB / Tmote Sky platforms, and close clones.
Utilizes the GoodFET firmware with CCSPI application, and the GoodFET client code.
'''
import os
import time
import struct
import time
from datetime import datetime, timedelta
from kbutils import KBCapabilities, makeFCS
from GoodFETCCSPI import GoodFETCCSPI
CC2420_REG_SYNC = 0x14
class TELOSB:
def __init__(self, dev):
'''
Instantiates the KillerBee class for our TelosB/TmoteSky running GoodFET firmware.
@type dev: String
@param dev: Serial device identifier (ex /dev/ttyUSB0)
@return: None
@rtype: None
'''
self._channel = None
self._page = 0
self.handle = None
self.dev = dev
os.environ["board"] = "telosb" #set enviroment variable for GoodFET code to use
self.handle = GoodFETCCSPI()
self.handle.serInit(port=self.dev)
self.handle.setup()
self.__stream_open = False
self.capabilities = KBCapabilities()
self.__set_capabilities()
def close(self):
self.handle.serClose()
self.handle = None
def check_capability(self, capab):
return self.capabilities.check(capab)
def get_capabilities(self):
return self.capabilities.getlist()
def __set_capabilities(self):
'''
Sets the capability information appropriate for GoodFETCCSPI client and firmware.
@rtype: None
@return: None
'''
self.capabilities.setcapab(KBCapabilities.FREQ_2400, True)
self.capabilities.setcapab(KBCapabilities.SNIFF, True)
self.capabilities.setcapab(KBCapabilities.SETCHAN, True)
self.capabilities.setcapab(KBCapabilities.INJECT, True)
self.capabilities.setcapab(KBCapabilities.PHYJAM_REFLEX, True)
self.capabilities.setcapab(KBCapabilities.SET_SYNC, True)
return
# KillerBee expects the driver to implement this function
#def get_dev_info(self, dev, bus):
def get_dev_info(self):
'''
Returns device information in a list identifying the device.
@rtype: List
@return: List of 3 strings identifying device.
'''
return [self.dev, "TelosB/Tmote", ""]
# KillerBee expects the driver to implement this function
def sniffer_on(self, channel=None, page=0):
'''
Turns the sniffer on such that pnext() will start returning observed
data. Will set the command mode to Air Capture if it is not already
set.
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not supported on this device
@rtype: None
'''
self.capabilities.require(KBCapabilities.SNIFF)
self.handle.RF_promiscuity(1);
self.handle.RF_autocrc(0);
if channel != None:
self.set_channel(channel, page)
self.handle.CC_RFST_RX();
#print "Sniffer started (listening as %010x on %i MHz)" % (self.handle.RF_getsmac(), self.handle.RF_getfreq()/10**6);
self.__stream_open = True
# KillerBee expects the driver to implement this function
def sniffer_off(self):
'''
Turns the sniffer off, freeing the hardware for other functions. It is
not necessary to call this function before closing the interface with
close().
@rtype: None
'''
#TODO actually have firmware stop sending us packets!
self.__stream_open = False
# KillerBee expects the driver to implement this function
def set_channel(self, channel, page=0):
'''
Sets the radio interface to the specifid channel (limited to 2.4 GHz channels 11-26)
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not supported on this device
@rtype: None
'''
self.capabilities.require(KBCapabilities.SETCHAN)
if channel >= 11 or channel <= 26:
self._channel = channel
self.handle.RF_setchan(channel)
else:
raise Exception('Invalid channel')
if page:
raise Exception('SubGHz not supported')
# KillerBee expects the driver to implement this function
def inject(self, packet, channel=None, count=1, delay=0, page=0):
'''
Injects the specified packet contents.
@type packet: String
@param packet: Packet contents to transmit, without FCS.
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not supported on this device
@type count: Integer
@param count: Transmits a specified number of frames, def=1
@type delay: Float
@param delay: Delay between each frame, def=1
@rtype: None
'''
self.capabilities.require(KBCapabilities.INJECT)
if len(packet) < 1:
raise Exception('Empty packet')
if len(packet) > 125: # 127 - 2 to accommodate FCS
raise Exception('Packet too long')
if channel != None:
self.set_channel(channel, page)
self.handle.RF_autocrc(1) #let radio add the CRC
for pnum in range(0, count):
gfready = [ord(x) for x in packet] #convert packet string to GoodFET expected integer format
gfready.insert(0, len(gfready)+2) #add a length that leaves room for CRC
self.handle.RF_txpacket(gfready)
# Sleep was for 1 second but testing by Gianfranco Costamagna suggested lowering to 1/100th of a second
time.sleep(0.01) #TODO get rid of completely, and just check CC2420 status
# https://github.com/alvarop/msp430-cc2500/blob/master/lib/cc2500/cc2500.c
# KillerBee expects the driver to implement this function
def pnext(self, timeout=100):
'''
Returns a dictionary containing packet data, else None.
@type timeout: Integer
@param timeout: Timeout to wait for packet reception in usec
@rtype: List
@return: Returns None is timeout expires and no packet received. When a packet is received, a dictionary is returned with the keys bytes (string of packet bytes), validcrc (boolean if a vaid CRC), rssi (unscaled RSSI), and location (may be set to None). For backwards compatibility, keys for 0,1,2 are provided such that it can be treated as if a list is returned, in the form [ String: packet contents | Bool: Valid CRC | Int: Unscaled RSSI ]
'''
if self.__stream_open == False:
self.sniffer_on() #start sniffing
packet = None;
start = datetime.utcnow()
while (packet is None and (start + timedelta(microseconds=timeout) > datetime.utcnow())):
packet = self.handle.RF_rxpacket()
rssi = self.handle.RF_getrssi() #TODO calibrate
if packet is None:
return None
frame = packet[1:]
if frame[-2:] == makeFCS(frame[:-2]): validcrc = True
else: validcrc = False
#Return in a nicer dictionary format, so we don't have to reference by number indicies.
#Note that 0,1,2 indicies inserted twice for backwards compatibility.
result = {0:frame, 1:validcrc, 2:rssi, 'bytes':frame, 'validcrc':validcrc, 'rssi':rssi, 'location':None}
result['dbm'] = rssi - 45 #TODO tune specifically to the Tmote platform (does ext antenna need to different?)
result['datetime'] = datetime.utcnow()
return result
def ping(self, da, panid, sa, channel=None, page=0):
'''
Not yet implemented.
@return: None
@rtype: None
'''
raise Exception('Not yet implemented')
def jammer_on(self, channel=None, page=0):
'''
Not yet implemented.
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not support on this device
@rtype: None
'''
self.capabilities.require(KBCapabilities.PHYJAM_REFLEX)
self.handle.RF_promiscuity(1)
self.handle.RF_autocrc(0)
if channel != None:
self.set_channel(channel, page)
self.handle.CC_RFST_RX()
self.handle.RF_reflexjam()
def set_sync(self, sync=0xA70F):
'''Set the register controlling the 802.15.4 PHY sync byte.'''
self.capabilities.require(KBCapabilities.SET_SYNC)
if (sync >> 16) > 0:
raise Exception("Sync word (%x) must be 2-bytes or less." % sync)
return self.handle.poke(CC2420_REG_SYNC, sync)
def jammer_off(self, channel=None, page=0):
'''
Not yet implemented.
@return: None
@rtype: None
'''
#TODO implement
raise Exception('Not yet implemented')
| 38.168776
| 452
| 0.632876
|
'''
Support for the TelosB / Tmote Sky platforms, and close clones.
Utilizes the GoodFET firmware with CCSPI application, and the GoodFET client code.
'''
import os
import time
import struct
import time
from datetime import datetime, timedelta
from kbutils import KBCapabilities, makeFCS
from GoodFETCCSPI import GoodFETCCSPI
CC2420_REG_SYNC = 0x14
class TELOSB:
def __init__(self, dev):
'''
Instantiates the KillerBee class for our TelosB/TmoteSky running GoodFET firmware.
@type dev: String
@param dev: Serial device identifier (ex /dev/ttyUSB0)
@return: None
@rtype: None
'''
self._channel = None
self._page = 0
self.handle = None
self.dev = dev
os.environ["board"] = "telosb"
self.handle = GoodFETCCSPI()
self.handle.serInit(port=self.dev)
self.handle.setup()
self.__stream_open = False
self.capabilities = KBCapabilities()
self.__set_capabilities()
def close(self):
self.handle.serClose()
self.handle = None
def check_capability(self, capab):
return self.capabilities.check(capab)
def get_capabilities(self):
return self.capabilities.getlist()
def __set_capabilities(self):
'''
Sets the capability information appropriate for GoodFETCCSPI client and firmware.
@rtype: None
@return: None
'''
self.capabilities.setcapab(KBCapabilities.FREQ_2400, True)
self.capabilities.setcapab(KBCapabilities.SNIFF, True)
self.capabilities.setcapab(KBCapabilities.SETCHAN, True)
self.capabilities.setcapab(KBCapabilities.INJECT, True)
self.capabilities.setcapab(KBCapabilities.PHYJAM_REFLEX, True)
self.capabilities.setcapab(KBCapabilities.SET_SYNC, True)
return
def get_dev_info(self):
'''
Returns device information in a list identifying the device.
@rtype: List
@return: List of 3 strings identifying device.
'''
return [self.dev, "TelosB/Tmote", ""]
def sniffer_on(self, channel=None, page=0):
'''
Turns the sniffer on such that pnext() will start returning observed
data. Will set the command mode to Air Capture if it is not already
set.
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not supported on this device
@rtype: None
'''
self.capabilities.require(KBCapabilities.SNIFF)
self.handle.RF_promiscuity(1);
self.handle.RF_autocrc(0);
if channel != None:
self.set_channel(channel, page)
self.handle.CC_RFST_RX();
self.__stream_open = True
def sniffer_off(self):
'''
Turns the sniffer off, freeing the hardware for other functions. It is
not necessary to call this function before closing the interface with
close().
@rtype: None
'''
self.__stream_open = False
def set_channel(self, channel, page=0):
'''
Sets the radio interface to the specifid channel (limited to 2.4 GHz channels 11-26)
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not supported on this device
@rtype: None
'''
self.capabilities.require(KBCapabilities.SETCHAN)
if channel >= 11 or channel <= 26:
self._channel = channel
self.handle.RF_setchan(channel)
else:
raise Exception('Invalid channel')
if page:
raise Exception('SubGHz not supported')
def inject(self, packet, channel=None, count=1, delay=0, page=0):
'''
Injects the specified packet contents.
@type packet: String
@param packet: Packet contents to transmit, without FCS.
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not supported on this device
@type count: Integer
@param count: Transmits a specified number of frames, def=1
@type delay: Float
@param delay: Delay between each frame, def=1
@rtype: None
'''
self.capabilities.require(KBCapabilities.INJECT)
if len(packet) < 1:
raise Exception('Empty packet')
if len(packet) > 125:
raise Exception('Packet too long')
if channel != None:
self.set_channel(channel, page)
self.handle.RF_autocrc(1)
for pnum in range(0, count):
gfready = [ord(x) for x in packet]
gfready.insert(0, len(gfready)+2)
self.handle.RF_txpacket(gfready)
time.sleep(0.01)
def pnext(self, timeout=100):
'''
Returns a dictionary containing packet data, else None.
@type timeout: Integer
@param timeout: Timeout to wait for packet reception in usec
@rtype: List
@return: Returns None is timeout expires and no packet received. When a packet is received, a dictionary is returned with the keys bytes (string of packet bytes), validcrc (boolean if a vaid CRC), rssi (unscaled RSSI), and location (may be set to None). For backwards compatibility, keys for 0,1,2 are provided such that it can be treated as if a list is returned, in the form [ String: packet contents | Bool: Valid CRC | Int: Unscaled RSSI ]
'''
if self.__stream_open == False:
self.sniffer_on()
packet = None;
start = datetime.utcnow()
while (packet is None and (start + timedelta(microseconds=timeout) > datetime.utcnow())):
packet = self.handle.RF_rxpacket()
rssi = self.handle.RF_getrssi()
if packet is None:
return None
frame = packet[1:]
if frame[-2:] == makeFCS(frame[:-2]): validcrc = True
else: validcrc = False
#Note that 0,1,2 indicies inserted twice for backwards compatibility.
result = {0:frame, 1:validcrc, 2:rssi, 'bytes':frame, 'validcrc':validcrc, 'rssi':rssi, 'location':None}
result['dbm'] = rssi - 45 #TODO tune specifically to the Tmote platform (does ext antenna need to different?)
result['datetime'] = datetime.utcnow()
return result
def ping(self, da, panid, sa, channel=None, page=0):
'''
Not yet implemented.
@return: None
@rtype: None
'''
raise Exception('Not yet implemented')
def jammer_on(self, channel=None, page=0):
'''
Not yet implemented.
@type channel: Integer
@param channel: Sets the channel, optional
@type page: Integer
@param page: Sets the subghz page, not support on this device
@rtype: None
'''
self.capabilities.require(KBCapabilities.PHYJAM_REFLEX)
self.handle.RF_promiscuity(1)
self.handle.RF_autocrc(0)
if channel != None:
self.set_channel(channel, page)
self.handle.CC_RFST_RX()
self.handle.RF_reflexjam()
def set_sync(self, sync=0xA70F):
'''Set the register controlling the 802.15.4 PHY sync byte.'''
self.capabilities.require(KBCapabilities.SET_SYNC)
if (sync >> 16) > 0:
raise Exception("Sync word (%x) must be 2-bytes or less." % sync)
return self.handle.poke(CC2420_REG_SYNC, sync)
def jammer_off(self, channel=None, page=0):
'''
Not yet implemented.
@return: None
@rtype: None
'''
#TODO implement
raise Exception('Not yet implemented')
| false
| true
|
f7193bb525a1bcd7a4c3765147b5f3469bdd3591
| 1,555
|
py
|
Python
|
etools/apps/uptime/utils.py
|
Igelinmist/etools
|
26ae66a2ad005a7a173253bc9822a770a3115645
|
[
"BSD-3-Clause"
] | null | null | null |
etools/apps/uptime/utils.py
|
Igelinmist/etools
|
26ae66a2ad005a7a173253bc9822a770a3115645
|
[
"BSD-3-Clause"
] | null | null | null |
etools/apps/uptime/utils.py
|
Igelinmist/etools
|
26ae66a2ad005a7a173253bc9822a770a3115645
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import timedelta, date
def req_date(local_date):
if isinstance(local_date, str):
d, m, y = local_date.split('.')
return '{0}-{1}-{2}'.format(y, m, d)
elif isinstance(local_date, date):
return local_date.strftime('%Y-%m-%d')
else:
return local_date
def req_timedelta(arg):
if isinstance(arg, timedelta):
return arg
else:
if isinstance(arg, str):
parts = arg.split(':')
try:
res = timedelta(hours=int(parts[0]), minutes=int(parts[1]))
except ValueError:
res = timedelta(0)
return res
else:
return timedelta(0)
def yesterday_local():
return (date.today() - timedelta(days=1)).strftime("%d.%m.%Y")
def stat_timedelta_for_report(time_delta, round_to_hour=True):
if time_delta:
sec = time_delta.total_seconds()
hours, remainder = divmod(sec, 3600)
if round_to_hour:
if remainder >= 1800:
hours += 1
return str(int(hours))
minutes, remainder = divmod(remainder, 60)
return "{0:,d}:{1:02}".format(int(hours), int(minutes)).replace(',',' ')
else:
return '-'
def custom_redirect(url_name, *args, **kwargs):
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.http import urlencode
url = reverse(url_name, args=args)
params = urlencode(kwargs)
return HttpResponseRedirect(url + "?%s" % params)
| 28.796296
| 80
| 0.595498
|
from datetime import timedelta, date
def req_date(local_date):
if isinstance(local_date, str):
d, m, y = local_date.split('.')
return '{0}-{1}-{2}'.format(y, m, d)
elif isinstance(local_date, date):
return local_date.strftime('%Y-%m-%d')
else:
return local_date
def req_timedelta(arg):
if isinstance(arg, timedelta):
return arg
else:
if isinstance(arg, str):
parts = arg.split(':')
try:
res = timedelta(hours=int(parts[0]), minutes=int(parts[1]))
except ValueError:
res = timedelta(0)
return res
else:
return timedelta(0)
def yesterday_local():
return (date.today() - timedelta(days=1)).strftime("%d.%m.%Y")
def stat_timedelta_for_report(time_delta, round_to_hour=True):
if time_delta:
sec = time_delta.total_seconds()
hours, remainder = divmod(sec, 3600)
if round_to_hour:
if remainder >= 1800:
hours += 1
return str(int(hours))
minutes, remainder = divmod(remainder, 60)
return "{0:,d}:{1:02}".format(int(hours), int(minutes)).replace(',',' ')
else:
return '-'
def custom_redirect(url_name, *args, **kwargs):
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.http import urlencode
url = reverse(url_name, args=args)
params = urlencode(kwargs)
return HttpResponseRedirect(url + "?%s" % params)
| true
| true
|
f7193dc596182608b60c2744dd8a96f97d37ed2c
| 11,229
|
py
|
Python
|
docs/conf.py
|
jeromedontdev/discord.py
|
42bab370a73440fa8af2380211ad92ccb6bf7f46
|
[
"MIT"
] | 13
|
2020-12-16T06:13:11.000Z
|
2021-04-15T12:01:38.000Z
|
docs/conf.py
|
RootGC/discord.py
|
8bc489dba8b8c7ca9141e4e7f00a0e916a7c0269
|
[
"MIT"
] | 1
|
2021-05-23T16:08:10.000Z
|
2021-05-23T16:08:10.000Z
|
docs/conf.py
|
RootGC/discord.py
|
8bc489dba8b8c7ca9141e4e7f00a0e916a7c0269
|
[
"MIT"
] | 6
|
2020-12-16T00:01:24.000Z
|
2021-02-05T12:32:54.000Z
|
#
# discord.py documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 21 05:43:30 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('extensions'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'builder',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinxcontrib_trio',
'details',
'exception_hierarchy',
'attributetable',
'resourcelinks',
'nitpick_file_ignorer',
]
autodoc_member_order = 'bysource'
autodoc_typehints = 'none'
extlinks = {
'issue': ('https://github.com/Rapptz/discord.py/issues/%s', 'GH-'),
}
# Links used for cross-referencing stuff in other documentation
intersphinx_mapping = {
'py': ('https://docs.python.org/3', None),
'aio': ('https://docs.aiohttp.org/en/stable/', None),
'req': ('https://docs.python-requests.org/en/latest/', None)
}
rst_prolog = """
.. |coro| replace:: This function is a |coroutine_link|_.
.. |maybecoro| replace:: This function *could be a* |coroutine_link|_.
.. |coroutine_link| replace:: *coroutine*
.. _coroutine_link: https://docs.python.org/3/library/asyncio-task.html#coroutine
"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'discord.py'
copyright = '2015-present, Rapptz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
with open('../discord/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
# The full version, including alpha/beta/rc tags.
release = version
# This assumes a tag is available for final releases
branch = 'master' if version.endswith('a') else 'v' + version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
locale_dirs = ['locale/']
gettext_compact = False
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Nitpicky mode options
nitpick_ignore_files = [
"migrating_to_async",
"migrating",
"whats_new",
]
# -- Options for HTML output ----------------------------------------------
html_experimental_html5_writer = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic'
html_context = {
'discord_invite': 'https://discord.gg/r3sSKJJ',
'discord_extensions': [
('discord.ext.commands', 'ext/commands'),
('discord.ext.tasks', 'ext/tasks'),
],
}
resource_links = {
'discord': 'https://discord.gg/r3sSKJJ',
'issues': 'https://github.com/Rapptz/discord.py/issues',
'discussions': 'https://github.com/Rapptz/discord.py/discussions',
'examples': f'https://github.com/Rapptz/discord.py/tree/{branch}/examples',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = './images/discord_py_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
html_search_scorer = '_static/scorer.js'
html_js_files = [
'custom.js',
'settings.js',
'copy.js',
'sidebar.js'
]
# Output file base name for HTML help builder.
htmlhelp_basename = 'discord.pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'discord.py.tex', 'discord.py Documentation',
'Rapptz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'discord.py', 'discord.py Documentation',
['Rapptz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'discord.py', 'discord.py Documentation',
'Rapptz', 'discord.py', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def setup(app):
if app.config.language == 'ja':
app.config.intersphinx_mapping['py'] = ('https://docs.python.org/ja/3', None)
app.config.html_context['discord_invite'] = 'https://discord.gg/nXzj3dg'
app.config.resource_links['discord'] = 'https://discord.gg/nXzj3dg'
| 31.191667
| 99
| 0.708612
|
import sys
import os
import re
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('extensions'))
extensions = [
'builder',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinxcontrib_trio',
'details',
'exception_hierarchy',
'attributetable',
'resourcelinks',
'nitpick_file_ignorer',
]
autodoc_member_order = 'bysource'
autodoc_typehints = 'none'
extlinks = {
'issue': ('https://github.com/Rapptz/discord.py/issues/%s', 'GH-'),
}
intersphinx_mapping = {
'py': ('https://docs.python.org/3', None),
'aio': ('https://docs.aiohttp.org/en/stable/', None),
'req': ('https://docs.python-requests.org/en/latest/', None)
}
rst_prolog = """
.. |coro| replace:: This function is a |coroutine_link|_.
.. |maybecoro| replace:: This function *could be a* |coroutine_link|_.
.. |coroutine_link| replace:: *coroutine*
.. _coroutine_link: https://docs.python.org/3/library/asyncio-task.html#coroutine
"""
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'discord.py'
copyright = '2015-present, Rapptz'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
with open('../discord/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
# The full version, including alpha/beta/rc tags.
release = version
# This assumes a tag is available for final releases
branch = 'master' if version.endswith('a') else 'v' + version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
locale_dirs = ['locale/']
gettext_compact = False
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Nitpicky mode options
nitpick_ignore_files = [
"migrating_to_async",
"migrating",
"whats_new",
]
# -- Options for HTML output ----------------------------------------------
html_experimental_html5_writer = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic'
html_context = {
'discord_invite': 'https://discord.gg/r3sSKJJ',
'discord_extensions': [
('discord.ext.commands', 'ext/commands'),
('discord.ext.tasks', 'ext/tasks'),
],
}
resource_links = {
'discord': 'https://discord.gg/r3sSKJJ',
'issues': 'https://github.com/Rapptz/discord.py/issues',
'discussions': 'https://github.com/Rapptz/discord.py/discussions',
'examples': f'https://github.com/Rapptz/discord.py/tree/{branch}/examples',
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = './images/discord_py_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
html_search_scorer = '_static/scorer.js'
html_js_files = [
'custom.js',
'settings.js',
'copy.js',
'sidebar.js'
]
# Output file base name for HTML help builder.
htmlhelp_basename = 'discord.pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'discord.py.tex', 'discord.py Documentation',
'Rapptz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'discord.py', 'discord.py Documentation',
['Rapptz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'discord.py', 'discord.py Documentation',
'Rapptz', 'discord.py', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def setup(app):
if app.config.language == 'ja':
app.config.intersphinx_mapping['py'] = ('https://docs.python.org/ja/3', None)
app.config.html_context['discord_invite'] = 'https://discord.gg/nXzj3dg'
app.config.resource_links['discord'] = 'https://discord.gg/nXzj3dg'
| true
| true
|
f7193e60bdbc11912523b4e6e6233bec11f0c404
| 11,846
|
py
|
Python
|
synapse/http/proxyagent.py
|
User-green/synapse
|
173ddbbe0b220bb28e67575079e1f775d73f967f
|
[
"Apache-2.0"
] | null | null | null |
synapse/http/proxyagent.py
|
User-green/synapse
|
173ddbbe0b220bb28e67575079e1f775d73f967f
|
[
"Apache-2.0"
] | null | null | null |
synapse/http/proxyagent.py
|
User-green/synapse
|
173ddbbe0b220bb28e67575079e1f775d73f967f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import re
from typing import Optional, Tuple
from urllib.request import getproxies_environment, proxy_bypass_environment
import attr
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.python.failure import Failure
from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IPolicyForHTTPS
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
logger = logging.getLogger(__name__)
_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
@attr.s
class ProxyCredentials:
username_password = attr.ib(type=bytes)
def as_proxy_authorization_value(self) -> bytes:
"""
Return the value for a Proxy-Authorization header (i.e. 'Basic abdef==').
Returns:
A transformation of the authentication string the encoded value for
a Proxy-Authorization header.
"""
# Encode as base64 and prepend the authorization type
return b"Basic " + base64.encodebytes(self.username_password)
@implementer(IAgent)
class ProxyAgent(_AgentBase):
"""An Agent implementation which will use an HTTP proxy if one was requested
Args:
reactor: twisted reactor to place outgoing
connections.
proxy_reactor: twisted reactor to use for connections to the proxy server
reactor might have some blacklisting applied (i.e. for DNS queries),
but we need unblocked access to the proxy.
contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the
verification parameters of OpenSSL. The default is to use a
`BrowserLikePolicyForHTTPS`, so unless you have special
requirements you can leave this as-is.
connectTimeout (Optional[float]): The amount of time that this Agent will wait
for the peer to accept a connection, in seconds. If 'None',
HostnameEndpoint's default (30s) will be used.
This is used for connections to both proxies and destination servers.
bindAddress (bytes): The local address for client sockets to bind to.
pool (HTTPConnectionPool|None): connection pool to be used. If None, a
non-persistent pool instance will be created.
use_proxy (bool): Whether proxy settings should be discovered and used
from conventional environment variables.
"""
def __init__(
self,
reactor,
proxy_reactor=None,
contextFactory: Optional[IPolicyForHTTPS] = None,
connectTimeout=None,
bindAddress=None,
pool=None,
use_proxy=False,
):
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
_AgentBase.__init__(self, reactor, pool)
if proxy_reactor is None:
self.proxy_reactor = reactor
else:
self.proxy_reactor = proxy_reactor
self._endpoint_kwargs = {}
if connectTimeout is not None:
self._endpoint_kwargs["timeout"] = connectTimeout
if bindAddress is not None:
self._endpoint_kwargs["bindAddress"] = bindAddress
http_proxy = None
https_proxy = None
no_proxy = None
if use_proxy:
proxies = getproxies_environment()
http_proxy = proxies["http"].encode() if "http" in proxies else None
https_proxy = proxies["https"].encode() if "https" in proxies else None
no_proxy = proxies["no"] if "no" in proxies else None
# Parse credentials from http and https proxy connection string if present
self.http_proxy_creds, http_proxy = parse_username_password(http_proxy)
self.https_proxy_creds, https_proxy = parse_username_password(https_proxy)
self.http_proxy_endpoint = _http_proxy_endpoint(
http_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.https_proxy_endpoint = _http_proxy_endpoint(
https_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.no_proxy = no_proxy
self._policy_for_https = contextFactory
self._reactor = reactor
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Issue a request to the server indicated by the given uri.
Supports `http` and `https` schemes.
An existing connection from the connection pool may be used or a new one may be
created.
See also: twisted.web.iweb.IAgent.request
Args:
method (bytes): The request method to use, such as `GET`, `POST`, etc
uri (bytes): The location of the resource to request.
headers (Headers|None): Extra headers to send with the request
bodyProducer (IBodyProducer|None): An object which can generate bytes to
make up the body of this request (for example, the properly encoded
contents of a file for a file upload). Or, None if the request is to
have no body.
Returns:
Deferred[IResponse]: completes when the header of the response has
been received (regardless of the response status code).
Can fail with:
SchemeNotSupported: if the uri is not http or https
twisted.internet.error.TimeoutError if the server we are connecting
to (proxy or destination) does not accept a connection before
connectTimeout.
... other things too.
"""
uri = uri.strip()
if not _VALID_URI.match(uri):
raise ValueError(f"Invalid URI {uri!r}")
parsed_uri = URI.fromBytes(uri)
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
request_path = parsed_uri.originForm
should_skip_proxy = False
if self.no_proxy is not None:
should_skip_proxy = proxy_bypass_environment(
parsed_uri.host.decode(),
proxies={"no": self.no_proxy},
)
if (
parsed_uri.scheme == b"http"
and self.http_proxy_endpoint
and not should_skip_proxy
):
# Determine whether we need to set Proxy-Authorization headers
if self.http_proxy_creds:
# Set a Proxy-Authorization header
if headers is None:
headers = Headers()
headers.addRawHeader(
b"Proxy-Authorization",
self.http_proxy_creds.as_proxy_authorization_value(),
)
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
pool_key = ("http-proxy", self.http_proxy_endpoint)
endpoint = self.http_proxy_endpoint
request_path = uri
elif (
parsed_uri.scheme == b"https"
and self.https_proxy_endpoint
and not should_skip_proxy
):
connect_headers = Headers()
# Determine whether we need to set Proxy-Authorization headers
if self.https_proxy_creds:
# Set a Proxy-Authorization header
connect_headers.addRawHeader(
b"Proxy-Authorization",
self.https_proxy_creds.as_proxy_authorization_value(),
)
endpoint = HTTPConnectProxyEndpoint(
self.proxy_reactor,
self.https_proxy_endpoint,
parsed_uri.host,
parsed_uri.port,
headers=connect_headers,
)
else:
# not using a proxy
endpoint = HostnameEndpoint(
self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
)
logger.debug("Requesting %s via %s", uri, endpoint)
if parsed_uri.scheme == b"https":
tls_connection_creator = self._policy_for_https.creatorForNetloc(
parsed_uri.host, parsed_uri.port
)
endpoint = wrapClientTLS(tls_connection_creator, endpoint)
elif parsed_uri.scheme == b"http":
pass
else:
return defer.fail(
Failure(
SchemeNotSupported("Unsupported scheme: %r" % (parsed_uri.scheme,))
)
)
return self._requestWithEndpoint(
pool_key, endpoint, method, parsed_uri, headers, bodyProducer, request_path
)
def _http_proxy_endpoint(proxy: Optional[bytes], reactor, **kwargs):
"""Parses an http proxy setting and returns an endpoint for the proxy
Args:
proxy: the proxy setting in the form: [<username>:<password>@]<host>[:<port>]
Note that compared to other apps, this function currently lacks support
for specifying a protocol schema (i.e. protocol://...).
reactor: reactor to be used to connect to the proxy
kwargs: other args to be passed to HostnameEndpoint
Returns:
interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy,
or None
"""
if proxy is None:
return None
# Parse the connection string
host, port = parse_host_port(proxy, default_port=1080)
return HostnameEndpoint(reactor, host, port, **kwargs)
def parse_username_password(proxy: bytes) -> Tuple[Optional[ProxyCredentials], bytes]:
"""
Parses the username and password from a proxy declaration e.g
username:password@hostname:port.
Args:
proxy: The proxy connection string.
Returns
An instance of ProxyCredentials and the proxy connection string with any credentials
stripped, i.e u:p@host:port -> host:port. If no credentials were found, the
ProxyCredentials instance is replaced with None.
"""
if proxy and b"@" in proxy:
# We use rsplit here as the password could contain an @ character
credentials, proxy_without_credentials = proxy.rsplit(b"@", 1)
return ProxyCredentials(credentials), proxy_without_credentials
return None, proxy
def parse_host_port(hostport: bytes, default_port: int = None) -> Tuple[bytes, int]:
"""
Parse the hostname and port from a proxy connection byte string.
Args:
hostport: The proxy connection string. Must be in the form 'host[:port]'.
default_port: The default port to return if one is not found in `hostport`.
Returns:
A tuple containing the hostname and port. Uses `default_port` if one was not found.
"""
if b":" in hostport:
host, port = hostport.rsplit(b":", 1)
try:
port = int(port)
return host, port
except ValueError:
# the thing after the : wasn't a valid port; presumably this is an
# IPv6 address.
pass
return hostport, default_port
| 36.674923
| 92
| 0.639878
|
import base64
import logging
import re
from typing import Optional, Tuple
from urllib.request import getproxies_environment, proxy_bypass_environment
import attr
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.python.failure import Failure
from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IPolicyForHTTPS
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
logger = logging.getLogger(__name__)
_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
@attr.s
class ProxyCredentials:
username_password = attr.ib(type=bytes)
def as_proxy_authorization_value(self) -> bytes:
return b"Basic " + base64.encodebytes(self.username_password)
@implementer(IAgent)
class ProxyAgent(_AgentBase):
def __init__(
self,
reactor,
proxy_reactor=None,
contextFactory: Optional[IPolicyForHTTPS] = None,
connectTimeout=None,
bindAddress=None,
pool=None,
use_proxy=False,
):
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
_AgentBase.__init__(self, reactor, pool)
if proxy_reactor is None:
self.proxy_reactor = reactor
else:
self.proxy_reactor = proxy_reactor
self._endpoint_kwargs = {}
if connectTimeout is not None:
self._endpoint_kwargs["timeout"] = connectTimeout
if bindAddress is not None:
self._endpoint_kwargs["bindAddress"] = bindAddress
http_proxy = None
https_proxy = None
no_proxy = None
if use_proxy:
proxies = getproxies_environment()
http_proxy = proxies["http"].encode() if "http" in proxies else None
https_proxy = proxies["https"].encode() if "https" in proxies else None
no_proxy = proxies["no"] if "no" in proxies else None
self.http_proxy_creds, http_proxy = parse_username_password(http_proxy)
self.https_proxy_creds, https_proxy = parse_username_password(https_proxy)
self.http_proxy_endpoint = _http_proxy_endpoint(
http_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.https_proxy_endpoint = _http_proxy_endpoint(
https_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.no_proxy = no_proxy
self._policy_for_https = contextFactory
self._reactor = reactor
def request(self, method, uri, headers=None, bodyProducer=None):
uri = uri.strip()
if not _VALID_URI.match(uri):
raise ValueError(f"Invalid URI {uri!r}")
parsed_uri = URI.fromBytes(uri)
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
request_path = parsed_uri.originForm
should_skip_proxy = False
if self.no_proxy is not None:
should_skip_proxy = proxy_bypass_environment(
parsed_uri.host.decode(),
proxies={"no": self.no_proxy},
)
if (
parsed_uri.scheme == b"http"
and self.http_proxy_endpoint
and not should_skip_proxy
):
if self.http_proxy_creds:
if headers is None:
headers = Headers()
headers.addRawHeader(
b"Proxy-Authorization",
self.http_proxy_creds.as_proxy_authorization_value(),
)
pool_key = ("http-proxy", self.http_proxy_endpoint)
endpoint = self.http_proxy_endpoint
request_path = uri
elif (
parsed_uri.scheme == b"https"
and self.https_proxy_endpoint
and not should_skip_proxy
):
connect_headers = Headers()
if self.https_proxy_creds:
connect_headers.addRawHeader(
b"Proxy-Authorization",
self.https_proxy_creds.as_proxy_authorization_value(),
)
endpoint = HTTPConnectProxyEndpoint(
self.proxy_reactor,
self.https_proxy_endpoint,
parsed_uri.host,
parsed_uri.port,
headers=connect_headers,
)
else:
endpoint = HostnameEndpoint(
self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
)
logger.debug("Requesting %s via %s", uri, endpoint)
if parsed_uri.scheme == b"https":
tls_connection_creator = self._policy_for_https.creatorForNetloc(
parsed_uri.host, parsed_uri.port
)
endpoint = wrapClientTLS(tls_connection_creator, endpoint)
elif parsed_uri.scheme == b"http":
pass
else:
return defer.fail(
Failure(
SchemeNotSupported("Unsupported scheme: %r" % (parsed_uri.scheme,))
)
)
return self._requestWithEndpoint(
pool_key, endpoint, method, parsed_uri, headers, bodyProducer, request_path
)
def _http_proxy_endpoint(proxy: Optional[bytes], reactor, **kwargs):
if proxy is None:
return None
host, port = parse_host_port(proxy, default_port=1080)
return HostnameEndpoint(reactor, host, port, **kwargs)
def parse_username_password(proxy: bytes) -> Tuple[Optional[ProxyCredentials], bytes]:
if proxy and b"@" in proxy:
credentials, proxy_without_credentials = proxy.rsplit(b"@", 1)
return ProxyCredentials(credentials), proxy_without_credentials
return None, proxy
def parse_host_port(hostport: bytes, default_port: int = None) -> Tuple[bytes, int]:
if b":" in hostport:
host, port = hostport.rsplit(b":", 1)
try:
port = int(port)
return host, port
except ValueError:
# IPv6 address.
pass
return hostport, default_port
| true
| true
|
f7193e638c0b7630f3bb08df8302e36c5888e4d8
| 889
|
py
|
Python
|
tensorflow_mri/python/layers/__init__.py
|
mrphys/tensorflow-mri
|
46a8929aec4180aba4961f902897e02592f25da6
|
[
"Apache-2.0"
] | 3
|
2021-07-28T17:22:26.000Z
|
2022-03-29T15:17:26.000Z
|
tensorflow_mri/python/layers/__init__.py
|
mrphys/tensorflow-mri
|
46a8929aec4180aba4961f902897e02592f25da6
|
[
"Apache-2.0"
] | 1
|
2021-07-23T01:37:11.000Z
|
2021-07-23T01:37:11.000Z
|
tensorflow_mri/python/layers/__init__.py
|
mrphys/tensorflow-mri
|
46a8929aec4180aba4961f902897e02592f25da6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 University College London. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network layers."""
from tensorflow_mri.python.layers.conv_blocks import *
from tensorflow_mri.python.layers.conv_endec import *
from tensorflow_mri.python.layers.preproc_layers import *
| 44.45
| 80
| 0.709786
|
from tensorflow_mri.python.layers.conv_blocks import *
from tensorflow_mri.python.layers.conv_endec import *
from tensorflow_mri.python.layers.preproc_layers import *
| true
| true
|
f7193e94de77b2cad9feb7c3c07ac84c618b271a
| 13,089
|
py
|
Python
|
train.py
|
fab464654/SSD_on_ActiveVisionDataset
|
1bc6f0745241d0b45c3f257c6fb09ea0435c993e
|
[
"MIT"
] | null | null | null |
train.py
|
fab464654/SSD_on_ActiveVisionDataset
|
1bc6f0745241d0b45c3f257c6fb09ea0435c993e
|
[
"MIT"
] | null | null | null |
train.py
|
fab464654/SSD_on_ActiveVisionDataset
|
1bc6f0745241d0b45c3f257c6fb09ea0435c993e
|
[
"MIT"
] | null | null | null |
import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from model import SSD300, MultiBoxLoss
from datasets import PascalVOCDataset
from utils import *
# Data parameters
data_folder = 'google_drive/MyDrive/ColabNotebooks/Project/GT' # folder with data files
keep_difficult = True # use objects considered difficult to detect?
# Model parameters
# Not too many here since the SSD300 has a very specific structure
n_classes = len(label_map) # number of different types of objects
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Learning parameters
checkpoint = "google_drive/MyDrive/checkpointsIeri/checkpoint_ssd300.pth.tar" # path to model checkpoint, None if none
batch_size = 9 # batch size
iterations = 120000 # number of iterations to train
workers = 4 # number of workers for loading data in the DataLoader
print_freq = 5 # print training status every __ batches
lr = 5e-4 # learning rate
decay_lr_at = [80000, 100000] # decay learning rate after these many iterations
decay_lr_to = 0.1 # decay learning rate to this fraction of the existing learning rate
momentum = 0.9 # momentum
weight_decay = 5e-4 # weight decay
grad_clip = None # clip if gradients are exploding, which may happen at larger batch sizes (sometimes at 32) - you will recognize it by a sorting error in the MuliBox loss calculation
cudnn.benchmark = True
def main():
"""
Training.
"""
global start_epoch, label_map, epoch, checkpoint, decay_lr_at
# Initialize model or load checkpoint
if checkpoint is None:
start_epoch = 0
model = SSD300(n_classes=n_classes)
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
biases = list()
not_biases = list()
for param_name, param in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
lr=lr, momentum=momentum, weight_decay=weight_decay)
else:
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
model = checkpoint['model']
optimizer = checkpoint['optimizer']
# Move to default device
model = model.to(device)
criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
#import active_vision_dataset_processing.data_loading
import transforms, active_vision_dataset
#Include all instances
pick_trans = transforms.PickInstances(range(34))
TRAIN_PATH = "./google_drive/MyDrive/ColabNotebooks/Project/trainDataset"
train_dataset = active_vision_dataset.AVD(root=TRAIN_PATH, train=True,
target_transform=pick_trans,
scene_list=['Home_001_1',
'Home_002_1',
'Home_003_1',
'Home_004_1',
'Home_005_1',
'Home_006_1',
'Home_007_1',
'Home_008_1',
'Home_014_1',
'Home_011_1',
'Home_010_1',
'Office_001_1'],
fraction_of_no_box=-1)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=active_vision_dataset.collate
)
"""
#I TRY TO USE THE DEFAULT DATASET LOADER::::::::::::::
# Custom dataloaders
train_dataset = PascalVOCDataset(data_folder,
split='train',
keep_difficult=keep_difficult)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
collate_fn=train_dataset.collate_fn, num_workers=workers,
pin_memory=True) # note that we're passing the collate function here
"""
# Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
# To convert iterations to epochs, divide iterations by the number of iterations per epoch
# The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
epochs = iterations // (len(train_dataset) // 32)
decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]
# Epochs
for epoch in range(start_epoch, epochs):
# Decay learning rate at particular epochs
if epoch in decay_lr_at:
adjust_learning_rate(optimizer, decay_lr_to)
# One epoch's training
train(train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch)
# Save checkpoint
save_checkpoint(epoch, model, optimizer)
def train(train_loader, model, criterion, optimizer, epoch):
"""
One epoch's training.
:param train_loader: DataLoader for training data
:param model: model
:param criterion: MultiBox loss
:param optimizer: optimizer
:param epoch: epoch number
"""
model.train() # training mode enables dropout
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
import numpy as np
# Batches
for i, (images, labels) in enumerate(train_loader):
#CHECK / REMOVE THIS CODE!
data_time.update(time.time() - start)
#print(len(images))
#print(labels)
# Move to default device
data = images
a = np.asarray(data)
#print(a.shape)
#a = np.squeeze(a, axis=1) # shape should now be (L, 224, 224, 3)
#image = torch.from_numpy(a)
#image = image.permute(0,3,1,2)
#print(image.shape)
#Pre-processing:
from torchvision import transforms as transf
preprocess = transf.Compose([
transf.ToPILImage(),
transf.Resize(300),
transf.CenterCrop(300),
transf.ToTensor(),
transf.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
for j in range(batch_size):
if j == 0:
input_tensor = preprocess(images[j])
input_tensor = input_tensor.unsqueeze(0)
input_batch = input_tensor
else:
input_tensor = preprocess(images[j])
#print(input_tensor)
input_tensor = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
#print(input_tensor.shape)
input_batch = torch.cat((input_batch, input_tensor), 0)
#print("shape images: ",input_batch.shape)
# In the Active Vision Dataset we have this formatting:
# [xmin ymin xmax ymax instance_id difficulty]
""" From the Tutorial:
Since the number of objects in any given image can vary, we can't use a fixed
size tensor for storing the bounding boxes for the entire batch of N images.
Therefore, ground truth bounding boxes fed to the model must be a list of
length N, where each element of the list is a Float tensor of dimensions
N_o, 4, where N_o is the number of objects present in that particular image.
Therefore, ground truth labels fed to the model must be a list of length N,
where each element of the list is a Long tensor of dimensions N_o, where N_o
is the number of objects present in that particular image.
"""
#Prints to test
#print(j)
box_id_diff = [b for b in labels[j][0]]
box = [l[0:4] for l in box_id_diff]
#print('before:',box) #To check
#Boundary coordinates as requested
for k in range(len(box)):
box[k][0] = box[k][0]/1920.0
box[k][2] = box[k][2]/1920.0
box[k][1] = box[k][1]/1080.0
box[k][3] = box[k][3]/1080.0
#print('after:',box) #To check
box_tensor = torch.FloatTensor(box).to(device)
#Done with the parameter in AVD method
"""
#Check if there are objects in the images
if j == 0:
start = True
if len(box_tensor) > 0:
if start == True:
box_list = box_tensor
start = False
elif start == False:
box_list = [box_list, box_tensor]
#box_list = torch.cat((box_list,box_tensor),0)
else:
start = True
"""
#print(box_tensor) #To check
if j == 0:
box_list = [box_tensor]
else:
box_list.append(box_tensor)
label = [l[4] for l in box_id_diff]
label_tensor = torch.LongTensor(label).to(device)
if j == 0:
label_list = [label_tensor]
else:
label_list.append(label_tensor)
#print(box_id_diff[0][0:4])
"""
if len(box_id_diff.size())-1 != 0:
if j == 0:
box = box_id_diff[0][0:4]
print("asad:",box)
#box = box.unsqueeze(0)
boxes = box
else:
box = [l[0:4] for l in box_id_diff]
#box = box.unsqueeze(0) # create a mini-batch as expected by the model
#print(input_tensor.shape)
boxes = torch.cat((boxes, box), 0)
print("boxes:", boxes)
"""
#box = torch.split(box_id_diff, 2)
#print(box)
"""
if not labels[j][0]:
labels = []
print("coasc")
else:
labels = [l.to(device) for l in torch.tensor(labels[j][0][4])]
"""
#print("list of boxes:",box_list)
#print("list of labels:", label_list)
images = input_batch.to(device) # (batch_size (N), 3, 300, 300)
#print(images.shape)
boxes = box_list
labels = label_list
# Forward prop.
predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)
#Prints to check the dimensions
#print(predicted_locs.shape) #correct
#print(predicted_scores.shape) #correct
# Loss
loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), loss=losses))
"""
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time, loss=losses))
"""
del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored
if __name__ == '__main__':
main()
| 38.049419
| 184
| 0.537627
|
import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from model import SSD300, MultiBoxLoss
from datasets import PascalVOCDataset
from utils import *
data_folder = 'google_drive/MyDrive/ColabNotebooks/Project/GT'
keep_difficult = True
n_classes = len(label_map)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = "google_drive/MyDrive/checkpointsIeri/checkpoint_ssd300.pth.tar"
batch_size = 9
iterations = 120000
workers = 4
print_freq = 5
lr = 5e-4
decay_lr_at = [80000, 100000]
decay_lr_to = 0.1
momentum = 0.9
weight_decay = 5e-4
grad_clip = None
cudnn.benchmark = True
def main():
global start_epoch, label_map, epoch, checkpoint, decay_lr_at
if checkpoint is None:
start_epoch = 0
model = SSD300(n_classes=n_classes)
biases = list()
not_biases = list()
for param_name, param in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
lr=lr, momentum=momentum, weight_decay=weight_decay)
else:
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
model = checkpoint['model']
optimizer = checkpoint['optimizer']
model = model.to(device)
criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
import transforms, active_vision_dataset
pick_trans = transforms.PickInstances(range(34))
TRAIN_PATH = "./google_drive/MyDrive/ColabNotebooks/Project/trainDataset"
train_dataset = active_vision_dataset.AVD(root=TRAIN_PATH, train=True,
target_transform=pick_trans,
scene_list=['Home_001_1',
'Home_002_1',
'Home_003_1',
'Home_004_1',
'Home_005_1',
'Home_006_1',
'Home_007_1',
'Home_008_1',
'Home_014_1',
'Home_011_1',
'Home_010_1',
'Office_001_1'],
fraction_of_no_box=-1)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=active_vision_dataset.collate
)
epochs = iterations // (len(train_dataset) // 32)
decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]
for epoch in range(start_epoch, epochs):
if epoch in decay_lr_at:
adjust_learning_rate(optimizer, decay_lr_to)
train(train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch)
# Save checkpoint
save_checkpoint(epoch, model, optimizer)
def train(train_loader, model, criterion, optimizer, epoch):
model.train() # training mode enables dropout
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
import numpy as np
# Batches
for i, (images, labels) in enumerate(train_loader):
#CHECK / REMOVE THIS CODE!
data_time.update(time.time() - start)
#print(len(images))
#print(labels)
# Move to default device
data = images
a = np.asarray(data)
#print(a.shape)
#a = np.squeeze(a, axis=1) # shape should now be (L, 224, 224, 3)
#image = torch.from_numpy(a)
#image = image.permute(0,3,1,2)
#print(image.shape)
#Pre-processing:
from torchvision import transforms as transf
preprocess = transf.Compose([
transf.ToPILImage(),
transf.Resize(300),
transf.CenterCrop(300),
transf.ToTensor(),
transf.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
for j in range(batch_size):
if j == 0:
input_tensor = preprocess(images[j])
input_tensor = input_tensor.unsqueeze(0)
input_batch = input_tensor
else:
input_tensor = preprocess(images[j])
#print(input_tensor)
input_tensor = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
#print(input_tensor.shape)
input_batch = torch.cat((input_batch, input_tensor), 0)
#print("shape images: ",input_batch.shape)
# In the Active Vision Dataset we have this formatting:
# [xmin ymin xmax ymax instance_id difficulty]
#Prints to test
#print(j)
box_id_diff = [b for b in labels[j][0]]
box = [l[0:4] for l in box_id_diff]
#print('before:',box) #To check
#Boundary coordinates as requested
for k in range(len(box)):
box[k][0] = box[k][0]/1920.0
box[k][2] = box[k][2]/1920.0
box[k][1] = box[k][1]/1080.0
box[k][3] = box[k][3]/1080.0
#print('after:',box) #To check
box_tensor = torch.FloatTensor(box).to(device)
#Done with the parameter in AVD method
#print(box_tensor) #To check
if j == 0:
box_list = [box_tensor]
else:
box_list.append(box_tensor)
label = [l[4] for l in box_id_diff]
label_tensor = torch.LongTensor(label).to(device)
if j == 0:
label_list = [label_tensor]
else:
label_list.append(label_tensor)
#print(box_id_diff[0][0:4])
#box = torch.split(box_id_diff, 2)
#print(box)
#print("list of boxes:",box_list)
#print("list of labels:", label_list)
images = input_batch.to(device) # (batch_size (N), 3, 300, 300)
#print(images.shape)
boxes = box_list
labels = label_list
# Forward prop.
predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)
#Prints to check the dimensions
#print(predicted_locs.shape) #correct
#print(predicted_scores.shape) #correct
# Loss
loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), loss=losses))
del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored
if __name__ == '__main__':
main()
| true
| true
|
f7193ee3518594b970384543fd7069dcd703cf96
| 7,181
|
py
|
Python
|
artikcloud/models/aggregates_histogram_response.py
|
artikcloud/artikcloud-python-dev
|
683cd8304f031913bcd581d1eb78ee0efbc5c113
|
[
"Apache-2.0"
] | null | null | null |
artikcloud/models/aggregates_histogram_response.py
|
artikcloud/artikcloud-python-dev
|
683cd8304f031913bcd581d1eb78ee0efbc5c113
|
[
"Apache-2.0"
] | null | null | null |
artikcloud/models/aggregates_histogram_response.py
|
artikcloud/artikcloud-python-dev
|
683cd8304f031913bcd581d1eb78ee0efbc5c113
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
ARTIK Cloud API
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class AggregatesHistogramResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, data=None, end_date=None, field=None, interval=None, sdid=None, size=None, start_date=None):
"""
AggregatesHistogramResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'data': 'list[AggregatesHistogramData]',
'end_date': 'int',
'field': 'str',
'interval': 'str',
'sdid': 'str',
'size': 'int',
'start_date': 'int'
}
self.attribute_map = {
'data': 'data',
'end_date': 'endDate',
'field': 'field',
'interval': 'interval',
'sdid': 'sdid',
'size': 'size',
'start_date': 'startDate'
}
self._data = data
self._end_date = end_date
self._field = field
self._interval = interval
self._sdid = sdid
self._size = size
self._start_date = start_date
@property
def data(self):
"""
Gets the data of this AggregatesHistogramResponse.
:return: The data of this AggregatesHistogramResponse.
:rtype: list[AggregatesHistogramData]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this AggregatesHistogramResponse.
:param data: The data of this AggregatesHistogramResponse.
:type: list[AggregatesHistogramData]
"""
self._data = data
@property
def end_date(self):
"""
Gets the end_date of this AggregatesHistogramResponse.
:return: The end_date of this AggregatesHistogramResponse.
:rtype: int
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""
Sets the end_date of this AggregatesHistogramResponse.
:param end_date: The end_date of this AggregatesHistogramResponse.
:type: int
"""
self._end_date = end_date
@property
def field(self):
"""
Gets the field of this AggregatesHistogramResponse.
:return: The field of this AggregatesHistogramResponse.
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""
Sets the field of this AggregatesHistogramResponse.
:param field: The field of this AggregatesHistogramResponse.
:type: str
"""
self._field = field
@property
def interval(self):
"""
Gets the interval of this AggregatesHistogramResponse.
:return: The interval of this AggregatesHistogramResponse.
:rtype: str
"""
return self._interval
@interval.setter
def interval(self, interval):
"""
Sets the interval of this AggregatesHistogramResponse.
:param interval: The interval of this AggregatesHistogramResponse.
:type: str
"""
self._interval = interval
@property
def sdid(self):
"""
Gets the sdid of this AggregatesHistogramResponse.
:return: The sdid of this AggregatesHistogramResponse.
:rtype: str
"""
return self._sdid
@sdid.setter
def sdid(self, sdid):
"""
Sets the sdid of this AggregatesHistogramResponse.
:param sdid: The sdid of this AggregatesHistogramResponse.
:type: str
"""
self._sdid = sdid
@property
def size(self):
"""
Gets the size of this AggregatesHistogramResponse.
:return: The size of this AggregatesHistogramResponse.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this AggregatesHistogramResponse.
:param size: The size of this AggregatesHistogramResponse.
:type: int
"""
self._size = size
@property
def start_date(self):
"""
Gets the start_date of this AggregatesHistogramResponse.
:return: The start_date of this AggregatesHistogramResponse.
:rtype: int
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""
Sets the start_date of this AggregatesHistogramResponse.
:param start_date: The start_date of this AggregatesHistogramResponse.
:type: int
"""
self._start_date = start_date
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.464539
| 115
| 0.576521
|
from pprint import pformat
from six import iteritems
import re
class AggregatesHistogramResponse(object):
def __init__(self, data=None, end_date=None, field=None, interval=None, sdid=None, size=None, start_date=None):
self.swagger_types = {
'data': 'list[AggregatesHistogramData]',
'end_date': 'int',
'field': 'str',
'interval': 'str',
'sdid': 'str',
'size': 'int',
'start_date': 'int'
}
self.attribute_map = {
'data': 'data',
'end_date': 'endDate',
'field': 'field',
'interval': 'interval',
'sdid': 'sdid',
'size': 'size',
'start_date': 'startDate'
}
self._data = data
self._end_date = end_date
self._field = field
self._interval = interval
self._sdid = sdid
self._size = size
self._start_date = start_date
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def end_date(self):
return self._end_date
@end_date.setter
def end_date(self, end_date):
self._end_date = end_date
@property
def field(self):
return self._field
@field.setter
def field(self, field):
self._field = field
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, interval):
self._interval = interval
@property
def sdid(self):
return self._sdid
@sdid.setter
def sdid(self, sdid):
self._sdid = sdid
@property
def size(self):
return self._size
@size.setter
def size(self, size):
self._size = size
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, start_date):
self._start_date = start_date
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f7193ef954651cd69d7c79d1330decefaa2e8768
| 9,116
|
py
|
Python
|
ucsmsdk/mometa/equipment/EquipmentRackEnclosure.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 78
|
2015-11-30T14:10:05.000Z
|
2022-02-13T00:29:08.000Z
|
ucsmsdk/mometa/equipment/EquipmentRackEnclosure.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 113
|
2015-11-20T09:42:46.000Z
|
2022-03-16T16:53:29.000Z
|
ucsmsdk/mometa/equipment/EquipmentRackEnclosure.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 86
|
2015-12-12T08:22:18.000Z
|
2022-01-23T03:56:34.000Z
|
"""This module contains the general information for EquipmentRackEnclosure ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentRackEnclosureConsts:
MFG_TIME_NOT_APPLICABLE = "not-applicable"
OPERABILITY_ACCESSIBILITY_PROBLEM = "accessibility-problem"
OPERABILITY_AUTO_UPGRADE = "auto-upgrade"
OPERABILITY_BACKPLANE_PORT_PROBLEM = "backplane-port-problem"
OPERABILITY_BIOS_POST_TIMEOUT = "bios-post-timeout"
OPERABILITY_CHASSIS_INTRUSION = "chassis-intrusion"
OPERABILITY_CHASSIS_LIMIT_EXCEEDED = "chassis-limit-exceeded"
OPERABILITY_CONFIG = "config"
OPERABILITY_DECOMISSIONING = "decomissioning"
OPERABILITY_DEGRADED = "degraded"
OPERABILITY_DISABLED = "disabled"
OPERABILITY_DISCOVERY = "discovery"
OPERABILITY_DISCOVERY_FAILED = "discovery-failed"
OPERABILITY_EQUIPMENT_PROBLEM = "equipment-problem"
OPERABILITY_FABRIC_CONN_PROBLEM = "fabric-conn-problem"
OPERABILITY_FABRIC_UNSUPPORTED_CONN = "fabric-unsupported-conn"
OPERABILITY_IDENTIFY = "identify"
OPERABILITY_IDENTITY_UNESTABLISHABLE = "identity-unestablishable"
OPERABILITY_INOPERABLE = "inoperable"
OPERABILITY_LINK_ACTIVATE_BLOCKED = "link-activate-blocked"
OPERABILITY_MALFORMED_FRU = "malformed-fru"
OPERABILITY_NON_OPTIMAL = "non-optimal"
OPERABILITY_NON_OPTIMAL_SEVERE = "non-optimal-severe"
OPERABILITY_NOT_SUPPORTED = "not-supported"
OPERABILITY_OPERABLE = "operable"
OPERABILITY_PEER_COMM_PROBLEM = "peer-comm-problem"
OPERABILITY_PERFORMANCE_PROBLEM = "performance-problem"
OPERABILITY_POST_FAILURE = "post-failure"
OPERABILITY_POWER_PROBLEM = "power-problem"
OPERABILITY_POWERED_OFF = "powered-off"
OPERABILITY_REMOVED = "removed"
OPERABILITY_THERMAL_PROBLEM = "thermal-problem"
OPERABILITY_UNKNOWN = "unknown"
OPERABILITY_UNSUPPORTED_CONFIG = "unsupported-config"
OPERABILITY_UPGRADE_PROBLEM = "upgrade-problem"
OPERABILITY_VOLTAGE_PROBLEM = "voltage-problem"
PRESENCE_EMPTY = "empty"
PRESENCE_EQUIPPED = "equipped"
PRESENCE_EQUIPPED_DEPRECATED = "equipped-deprecated"
PRESENCE_EQUIPPED_DISC_ERROR = "equipped-disc-error"
PRESENCE_EQUIPPED_DISC_IN_PROGRESS = "equipped-disc-in-progress"
PRESENCE_EQUIPPED_DISC_NOT_STARTED = "equipped-disc-not-started"
PRESENCE_EQUIPPED_DISC_UNKNOWN = "equipped-disc-unknown"
PRESENCE_EQUIPPED_IDENTITY_UNESTABLISHABLE = "equipped-identity-unestablishable"
PRESENCE_EQUIPPED_NOT_PRIMARY = "equipped-not-primary"
PRESENCE_EQUIPPED_SLAVE = "equipped-slave"
PRESENCE_EQUIPPED_UNSUPPORTED = "equipped-unsupported"
PRESENCE_EQUIPPED_WITH_MALFORMED_FRU = "equipped-with-malformed-fru"
PRESENCE_INACCESSIBLE = "inaccessible"
PRESENCE_MISMATCH = "mismatch"
PRESENCE_MISMATCH_IDENTITY_UNESTABLISHABLE = "mismatch-identity-unestablishable"
PRESENCE_MISMATCH_SLAVE = "mismatch-slave"
PRESENCE_MISSING = "missing"
PRESENCE_MISSING_SLAVE = "missing-slave"
PRESENCE_NOT_SUPPORTED = "not-supported"
PRESENCE_UNAUTHORIZED = "unauthorized"
PRESENCE_UNKNOWN = "unknown"
class EquipmentRackEnclosure(ManagedObject):
"""This is EquipmentRackEnclosure class."""
consts = EquipmentRackEnclosureConsts()
naming_props = set(['id'])
mo_meta = MoMeta("EquipmentRackEnclosure", "equipmentRackEnclosure", "rack-enclosure-[id]", VersionMeta.Version401a, "InputOutput", 0x3f, [], ["admin", "pn-equipment", "pn-maintenance", "pn-policy"], ['topSystem'], ['equipmentFanModule', 'equipmentPsu', 'equipmentSlotEp'], [None])
prop_meta = {
"asset_tag": MoPropertyMeta("asset_tag", "assetTag", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,32}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version401a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version401a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version401a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"mfg_time": MoPropertyMeta("mfg_time", "mfgTime", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["not-applicable"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"oper_qualifier_reason": MoPropertyMeta("oper_qualifier_reason", "operQualifierReason", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"operability": MoPropertyMeta("operability", "operability", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["accessibility-problem", "auto-upgrade", "backplane-port-problem", "bios-post-timeout", "chassis-intrusion", "chassis-limit-exceeded", "config", "decomissioning", "degraded", "disabled", "discovery", "discovery-failed", "equipment-problem", "fabric-conn-problem", "fabric-unsupported-conn", "identify", "identity-unestablishable", "inoperable", "link-activate-blocked", "malformed-fru", "non-optimal", "non-optimal-severe", "not-supported", "operable", "peer-comm-problem", "performance-problem", "post-failure", "power-problem", "powered-off", "removed", "thermal-problem", "unknown", "unsupported-config", "upgrade-problem", "voltage-problem"], []),
"part_number": MoPropertyMeta("part_number", "partNumber", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"presence": MoPropertyMeta("presence", "presence", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["empty", "equipped", "equipped-deprecated", "equipped-disc-error", "equipped-disc-in-progress", "equipped-disc-not-started", "equipped-disc-unknown", "equipped-identity-unestablishable", "equipped-not-primary", "equipped-slave", "equipped-unsupported", "equipped-with-malformed-fru", "inaccessible", "mismatch", "mismatch-identity-unestablishable", "mismatch-slave", "missing", "missing-slave", "not-supported", "unauthorized", "unknown"], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version401a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"vid": MoPropertyMeta("vid", "vid", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"assetTag": "asset_tag",
"childAction": "child_action",
"dn": "dn",
"fltAggr": "flt_aggr",
"id": "id",
"mfgTime": "mfg_time",
"model": "model",
"operQualifierReason": "oper_qualifier_reason",
"operability": "operability",
"partNumber": "part_number",
"presence": "presence",
"revision": "revision",
"rn": "rn",
"sacl": "sacl",
"serial": "serial",
"status": "status",
"vendor": "vendor",
"vid": "vid",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.asset_tag = None
self.child_action = None
self.flt_aggr = None
self.mfg_time = None
self.model = None
self.oper_qualifier_reason = None
self.operability = None
self.part_number = None
self.presence = None
self.revision = None
self.sacl = None
self.serial = None
self.status = None
self.vendor = None
self.vid = None
ManagedObject.__init__(self, "EquipmentRackEnclosure", parent_mo_or_dn, **kwargs)
| 66.057971
| 805
| 0.693725
|
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentRackEnclosureConsts:
MFG_TIME_NOT_APPLICABLE = "not-applicable"
OPERABILITY_ACCESSIBILITY_PROBLEM = "accessibility-problem"
OPERABILITY_AUTO_UPGRADE = "auto-upgrade"
OPERABILITY_BACKPLANE_PORT_PROBLEM = "backplane-port-problem"
OPERABILITY_BIOS_POST_TIMEOUT = "bios-post-timeout"
OPERABILITY_CHASSIS_INTRUSION = "chassis-intrusion"
OPERABILITY_CHASSIS_LIMIT_EXCEEDED = "chassis-limit-exceeded"
OPERABILITY_CONFIG = "config"
OPERABILITY_DECOMISSIONING = "decomissioning"
OPERABILITY_DEGRADED = "degraded"
OPERABILITY_DISABLED = "disabled"
OPERABILITY_DISCOVERY = "discovery"
OPERABILITY_DISCOVERY_FAILED = "discovery-failed"
OPERABILITY_EQUIPMENT_PROBLEM = "equipment-problem"
OPERABILITY_FABRIC_CONN_PROBLEM = "fabric-conn-problem"
OPERABILITY_FABRIC_UNSUPPORTED_CONN = "fabric-unsupported-conn"
OPERABILITY_IDENTIFY = "identify"
OPERABILITY_IDENTITY_UNESTABLISHABLE = "identity-unestablishable"
OPERABILITY_INOPERABLE = "inoperable"
OPERABILITY_LINK_ACTIVATE_BLOCKED = "link-activate-blocked"
OPERABILITY_MALFORMED_FRU = "malformed-fru"
OPERABILITY_NON_OPTIMAL = "non-optimal"
OPERABILITY_NON_OPTIMAL_SEVERE = "non-optimal-severe"
OPERABILITY_NOT_SUPPORTED = "not-supported"
OPERABILITY_OPERABLE = "operable"
OPERABILITY_PEER_COMM_PROBLEM = "peer-comm-problem"
OPERABILITY_PERFORMANCE_PROBLEM = "performance-problem"
OPERABILITY_POST_FAILURE = "post-failure"
OPERABILITY_POWER_PROBLEM = "power-problem"
OPERABILITY_POWERED_OFF = "powered-off"
OPERABILITY_REMOVED = "removed"
OPERABILITY_THERMAL_PROBLEM = "thermal-problem"
OPERABILITY_UNKNOWN = "unknown"
OPERABILITY_UNSUPPORTED_CONFIG = "unsupported-config"
OPERABILITY_UPGRADE_PROBLEM = "upgrade-problem"
OPERABILITY_VOLTAGE_PROBLEM = "voltage-problem"
PRESENCE_EMPTY = "empty"
PRESENCE_EQUIPPED = "equipped"
PRESENCE_EQUIPPED_DEPRECATED = "equipped-deprecated"
PRESENCE_EQUIPPED_DISC_ERROR = "equipped-disc-error"
PRESENCE_EQUIPPED_DISC_IN_PROGRESS = "equipped-disc-in-progress"
PRESENCE_EQUIPPED_DISC_NOT_STARTED = "equipped-disc-not-started"
PRESENCE_EQUIPPED_DISC_UNKNOWN = "equipped-disc-unknown"
PRESENCE_EQUIPPED_IDENTITY_UNESTABLISHABLE = "equipped-identity-unestablishable"
PRESENCE_EQUIPPED_NOT_PRIMARY = "equipped-not-primary"
PRESENCE_EQUIPPED_SLAVE = "equipped-slave"
PRESENCE_EQUIPPED_UNSUPPORTED = "equipped-unsupported"
PRESENCE_EQUIPPED_WITH_MALFORMED_FRU = "equipped-with-malformed-fru"
PRESENCE_INACCESSIBLE = "inaccessible"
PRESENCE_MISMATCH = "mismatch"
PRESENCE_MISMATCH_IDENTITY_UNESTABLISHABLE = "mismatch-identity-unestablishable"
PRESENCE_MISMATCH_SLAVE = "mismatch-slave"
PRESENCE_MISSING = "missing"
PRESENCE_MISSING_SLAVE = "missing-slave"
PRESENCE_NOT_SUPPORTED = "not-supported"
PRESENCE_UNAUTHORIZED = "unauthorized"
PRESENCE_UNKNOWN = "unknown"
class EquipmentRackEnclosure(ManagedObject):
consts = EquipmentRackEnclosureConsts()
naming_props = set(['id'])
mo_meta = MoMeta("EquipmentRackEnclosure", "equipmentRackEnclosure", "rack-enclosure-[id]", VersionMeta.Version401a, "InputOutput", 0x3f, [], ["admin", "pn-equipment", "pn-maintenance", "pn-policy"], ['topSystem'], ['equipmentFanModule', 'equipmentPsu', 'equipmentSlotEp'], [None])
prop_meta = {
"asset_tag": MoPropertyMeta("asset_tag", "assetTag", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,32}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version401a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version401a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version401a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"mfg_time": MoPropertyMeta("mfg_time", "mfgTime", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["not-applicable"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"oper_qualifier_reason": MoPropertyMeta("oper_qualifier_reason", "operQualifierReason", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"operability": MoPropertyMeta("operability", "operability", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["accessibility-problem", "auto-upgrade", "backplane-port-problem", "bios-post-timeout", "chassis-intrusion", "chassis-limit-exceeded", "config", "decomissioning", "degraded", "disabled", "discovery", "discovery-failed", "equipment-problem", "fabric-conn-problem", "fabric-unsupported-conn", "identify", "identity-unestablishable", "inoperable", "link-activate-blocked", "malformed-fru", "non-optimal", "non-optimal-severe", "not-supported", "operable", "peer-comm-problem", "performance-problem", "post-failure", "power-problem", "powered-off", "removed", "thermal-problem", "unknown", "unsupported-config", "upgrade-problem", "voltage-problem"], []),
"part_number": MoPropertyMeta("part_number", "partNumber", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"presence": MoPropertyMeta("presence", "presence", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["empty", "equipped", "equipped-deprecated", "equipped-disc-error", "equipped-disc-in-progress", "equipped-disc-not-started", "equipped-disc-unknown", "equipped-identity-unestablishable", "equipped-not-primary", "equipped-slave", "equipped-unsupported", "equipped-with-malformed-fru", "inaccessible", "mismatch", "mismatch-identity-unestablishable", "mismatch-slave", "missing", "missing-slave", "not-supported", "unauthorized", "unknown"], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version401a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"vid": MoPropertyMeta("vid", "vid", "string", VersionMeta.Version401a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"assetTag": "asset_tag",
"childAction": "child_action",
"dn": "dn",
"fltAggr": "flt_aggr",
"id": "id",
"mfgTime": "mfg_time",
"model": "model",
"operQualifierReason": "oper_qualifier_reason",
"operability": "operability",
"partNumber": "part_number",
"presence": "presence",
"revision": "revision",
"rn": "rn",
"sacl": "sacl",
"serial": "serial",
"status": "status",
"vendor": "vendor",
"vid": "vid",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.asset_tag = None
self.child_action = None
self.flt_aggr = None
self.mfg_time = None
self.model = None
self.oper_qualifier_reason = None
self.operability = None
self.part_number = None
self.presence = None
self.revision = None
self.sacl = None
self.serial = None
self.status = None
self.vendor = None
self.vid = None
ManagedObject.__init__(self, "EquipmentRackEnclosure", parent_mo_or_dn, **kwargs)
| true
| true
|
f7193f652b00cfdbac8c192602a1299716aac80a
| 1,690
|
py
|
Python
|
service/tests/test_auth.py
|
SWE-AGGERS/reactions_service
|
eb8e4bcb9f9e69c03a89da82f3c71a3454fc285c
|
[
"MIT"
] | null | null | null |
service/tests/test_auth.py
|
SWE-AGGERS/reactions_service
|
eb8e4bcb9f9e69c03a89da82f3c71a3454fc285c
|
[
"MIT"
] | null | null | null |
service/tests/test_auth.py
|
SWE-AGGERS/reactions_service
|
eb8e4bcb9f9e69c03a89da82f3c71a3454fc285c
|
[
"MIT"
] | null | null | null |
import json
import unittest
import mock
from service.app import create_app
from service.auth import encode_auth_token
from service.database import empty_db
class TestAuth(unittest.TestCase):
def test0(self):
user_id = 1
# create token
new_token = encode_auth_token(user_id)
_app = create_app(debug=True)
empty_db(_app)
with _app.test_client() as client:
with mock.patch('service.views.reactions.exist_story') as exist_story_mock:
exist_story_mock.return_value = True
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + new_token})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Reaction created!')
self.assertEqual(int(body['story_id']), 1)
# wrong token
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + 'a'})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Provide a valid auth token!')
self.assertEqual(int(body['story_id']), 1)
# wrong token: 'Bearer token malformed!'
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'a'})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Bearer token malformed!')
self.assertEqual(int(body['story_id']), 1)
| 40.238095
| 105
| 0.586391
|
import json
import unittest
import mock
from service.app import create_app
from service.auth import encode_auth_token
from service.database import empty_db
class TestAuth(unittest.TestCase):
def test0(self):
user_id = 1
new_token = encode_auth_token(user_id)
_app = create_app(debug=True)
empty_db(_app)
with _app.test_client() as client:
with mock.patch('service.views.reactions.exist_story') as exist_story_mock:
exist_story_mock.return_value = True
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + new_token})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Reaction created!')
self.assertEqual(int(body['story_id']), 1)
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + 'a'})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Provide a valid auth token!')
self.assertEqual(int(body['story_id']), 1)
reply = client.post('/reactions/1/1/1', headers={'Authorization': 'a'})
body = json.loads(str(reply.data, 'utf8'))
self.assertEqual(int(body['reaction']), 1)
self.assertEqual(body['reply'], 'Bearer token malformed!')
self.assertEqual(int(body['story_id']), 1)
| true
| true
|
f7193f6e01897e3e8a2c39a33cc6cfcfad2301fb
| 63,187
|
py
|
Python
|
tests/test_h3.py
|
SouvikGhosh05/aioquic
|
da566b8ee616b9c83d51f0f5ad0521393119f40f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_h3.py
|
SouvikGhosh05/aioquic
|
da566b8ee616b9c83d51f0f5ad0521393119f40f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_h3.py
|
SouvikGhosh05/aioquic
|
da566b8ee616b9c83d51f0f5ad0521393119f40f
|
[
"BSD-3-Clause"
] | null | null | null |
import binascii
import contextlib
import copy
from unittest import TestCase
from aioquic.buffer import Buffer, encode_uint_var
from aioquic.h3.connection import (
H3_ALPN,
ErrorCode,
FrameType,
FrameUnexpected,
H3Connection,
MessageError,
Setting,
SettingsError,
StreamType,
encode_frame,
encode_settings,
parse_settings,
validate_push_promise_headers,
validate_request_headers,
validate_response_headers,
validate_trailers,
)
from aioquic.h3.events import DataReceived, HeadersReceived, PushPromiseReceived
from aioquic.h3.exceptions import NoAvailablePushIDError
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.events import StreamDataReceived
from aioquic.quic.logger import QuicLogger
from .test_connection import client_and_server, transfer
DUMMY_SETTINGS = {
Setting.QPACK_MAX_TABLE_CAPACITY: 4096,
Setting.QPACK_BLOCKED_STREAMS: 16,
Setting.DUMMY: 1,
}
QUIC_CONFIGURATION_OPTIONS = {"alpn_protocols": H3_ALPN}
def h3_client_and_server(options=QUIC_CONFIGURATION_OPTIONS):
return client_and_server(
client_options=options,
server_options=options,
)
@contextlib.contextmanager
def h3_fake_client_and_server(options=QUIC_CONFIGURATION_OPTIONS):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True, **options)
)
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False, **options)
)
# exchange transport parameters
quic_client._remote_max_datagram_frame_size = (
quic_server.configuration.max_datagram_frame_size
)
quic_server._remote_max_datagram_frame_size = (
quic_client.configuration.max_datagram_frame_size
)
yield quic_client, quic_server
def h3_transfer(quic_sender, h3_receiver):
quic_receiver = h3_receiver._quic
if hasattr(quic_sender, "stream_queue"):
quic_receiver._events.extend(quic_sender.stream_queue)
quic_sender.stream_queue.clear()
else:
transfer(quic_sender, quic_receiver)
# process QUIC events
http_events = []
event = quic_receiver.next_event()
while event is not None:
http_events.extend(h3_receiver.handle_event(event))
event = quic_receiver.next_event()
return http_events
class FakeQuicConnection:
def __init__(self, configuration):
self.closed = None
self.configuration = configuration
self.stream_queue = []
self._events = []
self._next_stream_bidi = 0 if configuration.is_client else 1
self._next_stream_uni = 2 if configuration.is_client else 3
self._quic_logger = QuicLogger().start_trace(
is_client=configuration.is_client, odcid=b""
)
self._remote_max_datagram_frame_size = None
def close(self, error_code, reason_phrase):
self.closed = (error_code, reason_phrase)
def get_next_available_stream_id(self, is_unidirectional=False):
if is_unidirectional:
stream_id = self._next_stream_uni
self._next_stream_uni += 4
else:
stream_id = self._next_stream_bidi
self._next_stream_bidi += 4
return stream_id
def next_event(self):
try:
return self._events.pop(0)
except IndexError:
return None
def send_stream_data(self, stream_id, data, end_stream=False):
# chop up data into individual bytes
for c in data:
self.stream_queue.append(
StreamDataReceived(
data=bytes([c]), end_stream=False, stream_id=stream_id
)
)
if end_stream:
self.stream_queue.append(
StreamDataReceived(data=b"", end_stream=end_stream, stream_id=stream_id)
)
class H3ConnectionTest(TestCase):
maxDiff = None
def _make_request(self, h3_client, h3_server):
quic_client = h3_client._quic
quic_server = h3_server._quic
# send request
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
)
h3_client.send_data(stream_id=stream_id, data=b"", end_stream=True)
# receive request
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(data=b"", stream_id=stream_id, stream_ended=True),
],
)
# send response
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
(b"x-foo", b"server"),
],
)
h3_server.send_data(
stream_id=stream_id,
data=b"<html><body>hello</body></html>",
end_stream=True,
)
# receive response
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
(b"x-foo", b"server"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(
data=b"<html><body>hello</body></html>",
stream_id=stream_id,
stream_ended=True,
),
],
)
def test_handle_control_frame_headers(self):
"""
We should not receive HEADERS on the control stream.
"""
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
self.assertIsNotNone(h3_server.sent_settings)
self.assertIsNone(h3_server.received_settings)
# receive SETTINGS
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertIsNone(quic_server.closed)
self.assertIsNotNone(h3_server.sent_settings)
self.assertEqual(h3_server.received_settings, DUMMY_SETTINGS)
# receive unexpected HEADERS
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_frame(FrameType.HEADERS, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Invalid frame type on control stream"),
)
def test_handle_control_frame_max_push_id_from_client_before_settings(self):
"""
A server should not receive MAX_PUSH_ID before SETTINGS.
"""
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
# receive unexpected MAX_PUSH_ID
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.MAX_PUSH_ID, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_MISSING_SETTINGS, ""),
)
def test_handle_control_frame_max_push_id_from_server(self):
"""
A client should not receive MAX_PUSH_ID on the control stream.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
# receive SETTINGS
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertIsNone(quic_client.closed)
# receive unexpected MAX_PUSH_ID
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=encode_frame(FrameType.MAX_PUSH_ID, b""),
end_stream=False,
)
)
self.assertEqual(
quic_client.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Servers must not send MAX_PUSH_ID"),
)
def test_handle_control_settings_twice(self):
"""
We should not receive HEADERS on the control stream.
"""
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
# receive SETTINGS
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertIsNone(quic_server.closed)
# receive unexpected SETTINGS
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "SETTINGS have already been received"),
)
def test_handle_control_stream_close(self):
"""
Closing the control stream is not allowed.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
# receive SETTINGS
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertIsNone(quic_client.closed)
# receive unexpected FIN
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=b"",
end_stream=True,
)
)
self.assertEqual(
quic_client.closed,
(
ErrorCode.H3_CLOSED_CRITICAL_STREAM,
"Closing control stream is not allowed",
),
)
def test_handle_control_stream_duplicate(self):
"""
We must only receive a single control stream.
"""
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
# receive a first control stream
h3_server.handle_event(
StreamDataReceived(
stream_id=2, data=encode_uint_var(StreamType.CONTROL), end_stream=False
)
)
# receive a second control stream
h3_server.handle_event(
StreamDataReceived(
stream_id=6, data=encode_uint_var(StreamType.CONTROL), end_stream=False
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_STREAM_CREATION_ERROR,
"Only one control stream is allowed",
),
)
def test_handle_push_frame_wrong_frame_type(self):
"""
We should not received SETTINGS on a push stream.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=15,
data=encode_uint_var(StreamType.PUSH)
+ encode_uint_var(0) # push ID
+ encode_frame(FrameType.SETTINGS, b""),
end_stream=False,
)
)
self.assertEqual(
quic_client.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Invalid frame type on push stream"),
)
def test_handle_qpack_decoder_duplicate(self):
"""
We must only receive a single QPACK decoder stream.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
# receive a first decoder stream
h3_client.handle_event(
StreamDataReceived(
stream_id=11,
data=encode_uint_var(StreamType.QPACK_DECODER),
end_stream=False,
)
)
# receive a second decoder stream
h3_client.handle_event(
StreamDataReceived(
stream_id=15,
data=encode_uint_var(StreamType.QPACK_DECODER),
end_stream=False,
)
)
self.assertEqual(
quic_client.closed,
(
ErrorCode.H3_STREAM_CREATION_ERROR,
"Only one QPACK decoder stream is allowed",
),
)
def test_handle_qpack_decoder_stream_error(self):
"""
Receiving garbage on the QPACK decoder stream triggers an exception.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=11,
data=encode_uint_var(StreamType.QPACK_DECODER) + b"\x00",
end_stream=False,
)
)
self.assertEqual(quic_client.closed, (ErrorCode.QPACK_DECODER_STREAM_ERROR, ""))
def test_handle_qpack_encoder_duplicate(self):
"""
We must only receive a single QPACK encoder stream.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
# receive a first encoder stream
h3_client.handle_event(
StreamDataReceived(
stream_id=11,
data=encode_uint_var(StreamType.QPACK_ENCODER),
end_stream=False,
)
)
# receive a second encoder stream
h3_client.handle_event(
StreamDataReceived(
stream_id=15,
data=encode_uint_var(StreamType.QPACK_ENCODER),
end_stream=False,
)
)
self.assertEqual(
quic_client.closed,
(
ErrorCode.H3_STREAM_CREATION_ERROR,
"Only one QPACK encoder stream is allowed",
),
)
def test_handle_qpack_encoder_stream_error(self):
"""
Receiving garbage on the QPACK encoder stream triggers an exception.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=7,
data=encode_uint_var(StreamType.QPACK_ENCODER) + b"\x00",
end_stream=False,
)
)
self.assertEqual(quic_client.closed, (ErrorCode.QPACK_ENCODER_STREAM_ERROR, ""))
def test_handle_request_frame_bad_headers(self):
"""
We should not receive HEADERS which cannot be decoded.
"""
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0, data=encode_frame(FrameType.HEADERS, b""), end_stream=False
)
)
self.assertEqual(quic_server.closed, (ErrorCode.QPACK_DECOMPRESSION_FAILED, ""))
def test_handle_request_frame_data_before_headers(self):
"""
We should not receive DATA before receiving headers.
"""
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0, data=encode_frame(FrameType.DATA, b""), end_stream=False
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_FRAME_UNEXPECTED,
"DATA frame is not allowed in this state",
),
)
def test_handle_request_frame_headers_after_trailers(self):
"""
We should not receive HEADERS after receiving trailers.
"""
with h3_fake_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
)
h3_client.send_headers(
stream_id=stream_id,
headers=[(b"x-some-trailer", b"foo")],
end_stream=True,
)
h3_transfer(quic_client, h3_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0,
data=encode_frame(FrameType.HEADERS, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_FRAME_UNEXPECTED,
"HEADERS frame is not allowed in this state",
),
)
def test_handle_request_frame_push_promise_from_client(self):
"""
A server should not receive PUSH_PROMISE on a request stream.
"""
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0,
data=encode_frame(FrameType.PUSH_PROMISE, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Clients must not send PUSH_PROMISE"),
)
def test_handle_request_frame_wrong_frame_type(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0,
data=encode_frame(FrameType.SETTINGS, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Invalid frame type on request stream"),
)
def test_request(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
# make first request
self._make_request(h3_client, h3_server)
# make second request
self._make_request(h3_client, h3_server)
# make third request -> dynamic table
self._make_request(h3_client, h3_server)
def test_request_headers_only(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
# send request
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"HEAD"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
end_stream=True,
)
# receive request
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"HEAD"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
stream_id=stream_id,
stream_ended=True,
)
],
)
# send response
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
(b"x-foo", b"server"),
],
end_stream=True,
)
# receive response
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
(b"x-foo", b"server"),
],
stream_id=stream_id,
stream_ended=True,
)
],
)
def test_request_fragmented_frame(self):
with h3_fake_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
# send request
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
)
h3_client.send_data(stream_id=stream_id, data=b"hello", end_stream=True)
# receive request
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(data=b"h", stream_id=0, stream_ended=False),
DataReceived(data=b"e", stream_id=0, stream_ended=False),
DataReceived(data=b"l", stream_id=0, stream_ended=False),
DataReceived(data=b"l", stream_id=0, stream_ended=False),
DataReceived(data=b"o", stream_id=0, stream_ended=False),
DataReceived(data=b"", stream_id=0, stream_ended=True),
],
)
# send push promise
push_stream_id = h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.txt"),
],
)
self.assertEqual(push_stream_id, 15)
# send response
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
end_stream=False,
)
h3_server.send_data(stream_id=stream_id, data=b"html", end_stream=True)
# fulfill push promise
h3_server.send_headers(
stream_id=push_stream_id,
headers=[(b":status", b"200"), (b"content-type", b"text/plain")],
end_stream=False,
)
h3_server.send_data(stream_id=push_stream_id, data=b"text", end_stream=True)
# receive push promise / reponse
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
PushPromiseReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.txt"),
],
push_id=0,
stream_id=stream_id,
),
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
stream_id=0,
stream_ended=False,
),
DataReceived(data=b"h", stream_id=0, stream_ended=False),
DataReceived(data=b"t", stream_id=0, stream_ended=False),
DataReceived(data=b"m", stream_id=0, stream_ended=False),
DataReceived(data=b"l", stream_id=0, stream_ended=False),
DataReceived(data=b"", stream_id=0, stream_ended=True),
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/plain"),
],
stream_id=15,
stream_ended=False,
push_id=0,
),
DataReceived(
data=b"t", stream_id=15, stream_ended=False, push_id=0
),
DataReceived(
data=b"e", stream_id=15, stream_ended=False, push_id=0
),
DataReceived(
data=b"x", stream_id=15, stream_ended=False, push_id=0
),
DataReceived(
data=b"t", stream_id=15, stream_ended=False, push_id=0
),
DataReceived(data=b"", stream_id=15, stream_ended=True, push_id=0),
],
)
def test_request_with_server_push(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
# send request
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
end_stream=True,
)
# receive request
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
stream_id=stream_id,
stream_ended=True,
)
],
)
# send push promises
push_stream_id_css = h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.css"),
],
)
self.assertEqual(push_stream_id_css, 15)
push_stream_id_js = h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.js"),
],
)
self.assertEqual(push_stream_id_js, 19)
# send response
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
end_stream=False,
)
h3_server.send_data(
stream_id=stream_id,
data=b"<html><body>hello</body></html>",
end_stream=True,
)
# fulfill push promises
h3_server.send_headers(
stream_id=push_stream_id_css,
headers=[(b":status", b"200"), (b"content-type", b"text/css")],
end_stream=False,
)
h3_server.send_data(
stream_id=push_stream_id_css,
data=b"body { color: pink }",
end_stream=True,
)
h3_server.send_headers(
stream_id=push_stream_id_js,
headers=[
(b":status", b"200"),
(b"content-type", b"application/javascript"),
],
end_stream=False,
)
h3_server.send_data(
stream_id=push_stream_id_js, data=b"alert('howdee');", end_stream=True
)
# receive push promises, response and push responses
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
PushPromiseReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.css"),
],
push_id=0,
stream_id=stream_id,
),
PushPromiseReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.js"),
],
push_id=1,
stream_id=stream_id,
),
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(
data=b"<html><body>hello</body></html>",
stream_id=stream_id,
stream_ended=True,
),
HeadersReceived(
headers=[(b":status", b"200"), (b"content-type", b"text/css")],
push_id=0,
stream_id=push_stream_id_css,
stream_ended=False,
),
DataReceived(
data=b"body { color: pink }",
push_id=0,
stream_id=push_stream_id_css,
stream_ended=True,
),
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"application/javascript"),
],
push_id=1,
stream_id=push_stream_id_js,
stream_ended=False,
),
DataReceived(
data=b"alert('howdee');",
push_id=1,
stream_id=push_stream_id_js,
stream_ended=True,
),
],
)
def test_request_with_server_push_max_push_id(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
# send request
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
end_stream=True,
)
# receive request
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
stream_id=stream_id,
stream_ended=True,
)
],
)
# send push promises
for i in range(0, 8):
h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", "/{}.css".format(i).encode("ascii")),
],
)
# send one too many
with self.assertRaises(NoAvailablePushIDError):
h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/8.css"),
],
)
def test_send_data_after_trailers(self):
"""
We should not send DATA after trailers.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
)
h3_client.send_headers(
stream_id=stream_id, headers=[(b"x-some-trailer", b"foo")], end_stream=False
)
with self.assertRaises(FrameUnexpected):
h3_client.send_data(stream_id=stream_id, data=b"hello", end_stream=False)
def test_send_data_before_headers(self):
"""
We should not send DATA before headers.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
stream_id = quic_client.get_next_available_stream_id()
with self.assertRaises(FrameUnexpected):
h3_client.send_data(stream_id=stream_id, data=b"hello", end_stream=False)
def test_send_headers_after_trailers(self):
"""
We should not send HEADERS after trailers.
"""
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
)
h3_client.send_headers(
stream_id=stream_id, headers=[(b"x-some-trailer", b"foo")], end_stream=False
)
with self.assertRaises(FrameUnexpected):
h3_client.send_headers(
stream_id=stream_id,
headers=[(b"x-other-trailer", b"foo")],
end_stream=False,
)
def test_blocked_stream(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=binascii.unhexlify(
"0004170150000680020000074064091040bcc0000000faceb00c"
),
end_stream=False,
)
)
h3_client.handle_event(
StreamDataReceived(stream_id=7, data=b"\x02", end_stream=False)
)
h3_client.handle_event(
StreamDataReceived(stream_id=11, data=b"\x03", end_stream=False)
)
h3_client.handle_event(
StreamDataReceived(
stream_id=0, data=binascii.unhexlify("01040280d910"), end_stream=False
)
)
h3_client.handle_event(
StreamDataReceived(
stream_id=0,
data=binascii.unhexlify(
"00408d796f752072656163686564206d766673742e6e65742c20726561636820"
"746865202f6563686f20656e64706f696e7420666f7220616e206563686f2072"
"6573706f6e7365207175657279202f3c6e756d6265723e20656e64706f696e74"
"7320666f722061207661726961626c652073697a6520726573706f6e73652077"
"6974682072616e646f6d206279746573"
),
end_stream=True,
)
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=7,
data=binascii.unhexlify(
"3fe101c696d07abe941094cb6d0a08017d403971966e32ca98b46f"
),
end_stream=False,
)
),
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"date", b"Mon, 22 Jul 2019 06:33:33 GMT"),
],
stream_id=0,
stream_ended=False,
),
DataReceived(
data=(
b"you reached mvfst.net, reach the /echo endpoint for an "
b"echo response query /<number> endpoints for a variable "
b"size response with random bytes"
),
stream_id=0,
stream_ended=True,
),
],
)
def test_blocked_stream_trailer(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=binascii.unhexlify(
"0004170150000680020000074064091040bcc0000000faceb00c"
),
end_stream=False,
)
)
h3_client.handle_event(
StreamDataReceived(stream_id=7, data=b"\x02", end_stream=False)
)
h3_client.handle_event(
StreamDataReceived(stream_id=11, data=b"\x03", end_stream=False)
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=0,
data=binascii.unhexlify(
"011b0000d95696d07abe941094cb6d0a08017d403971966e32ca98b46f"
),
end_stream=False,
)
),
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"date", b"Mon, 22 Jul 2019 06:33:33 GMT"),
],
stream_id=0,
stream_ended=False,
)
],
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=0,
data=binascii.unhexlify(
"00408d796f752072656163686564206d766673742e6e65742c20726561636820"
"746865202f6563686f20656e64706f696e7420666f7220616e206563686f2072"
"6573706f6e7365207175657279202f3c6e756d6265723e20656e64706f696e74"
"7320666f722061207661726961626c652073697a6520726573706f6e73652077"
"6974682072616e646f6d206279746573"
),
end_stream=False,
)
),
[
DataReceived(
data=(
b"you reached mvfst.net, reach the /echo endpoint for an "
b"echo response query /<number> endpoints for a variable "
b"size response with random bytes"
),
stream_id=0,
stream_ended=False,
)
],
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=0, data=binascii.unhexlify("0103028010"), end_stream=True
)
),
[],
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=7,
data=binascii.unhexlify("6af2b20f49564d833505b38294e7"),
end_stream=False,
)
),
[
HeadersReceived(
headers=[(b"x-some-trailer", b"foo")],
stream_id=0,
stream_ended=True,
push_id=None,
)
],
)
def test_uni_stream_grease(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_server = H3Connection(quic_server)
quic_client.send_stream_data(
14, b"\xff\xff\xff\xff\xff\xff\xff\xfeGREASE is the word"
)
self.assertEqual(h3_transfer(quic_client, h3_server), [])
def test_request_with_trailers(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
# send request with trailers
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
end_stream=False,
)
h3_client.send_headers(
stream_id=stream_id,
headers=[(b"x-some-trailer", b"foo")],
end_stream=True,
)
# receive request
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
stream_id=stream_id,
stream_ended=False,
),
HeadersReceived(
headers=[(b"x-some-trailer", b"foo")],
stream_id=stream_id,
stream_ended=True,
),
],
)
# send response
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
end_stream=False,
)
h3_server.send_data(
stream_id=stream_id,
data=b"<html><body>hello</body></html>",
end_stream=False,
)
h3_server.send_headers(
stream_id=stream_id,
headers=[(b"x-some-trailer", b"bar")],
end_stream=True,
)
# receive response
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(
data=b"<html><body>hello</body></html>",
stream_id=stream_id,
stream_ended=False,
),
HeadersReceived(
headers=[(b"x-some-trailer", b"bar")],
stream_id=stream_id,
stream_ended=True,
),
],
)
def test_uni_stream_type(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_server = H3Connection(quic_server)
# unknown stream type 9
stream_id = quic_client.get_next_available_stream_id(is_unidirectional=True)
self.assertEqual(stream_id, 2)
quic_client.send_stream_data(stream_id, b"\x09")
self.assertEqual(h3_transfer(quic_client, h3_server), [])
self.assertEqual(list(h3_server._stream.keys()), [2])
self.assertEqual(h3_server._stream[2].buffer, b"")
self.assertEqual(h3_server._stream[2].stream_type, 9)
# unknown stream type 64, one byte at a time
stream_id = quic_client.get_next_available_stream_id(is_unidirectional=True)
self.assertEqual(stream_id, 6)
quic_client.send_stream_data(stream_id, b"\x40")
self.assertEqual(h3_transfer(quic_client, h3_server), [])
self.assertEqual(list(h3_server._stream.keys()), [2, 6])
self.assertEqual(h3_server._stream[2].buffer, b"")
self.assertEqual(h3_server._stream[2].stream_type, 9)
self.assertEqual(h3_server._stream[6].buffer, b"\x40")
self.assertEqual(h3_server._stream[6].stream_type, None)
quic_client.send_stream_data(stream_id, b"\x40")
self.assertEqual(h3_transfer(quic_client, h3_server), [])
self.assertEqual(list(h3_server._stream.keys()), [2, 6])
self.assertEqual(h3_server._stream[2].buffer, b"")
self.assertEqual(h3_server._stream[2].stream_type, 9)
self.assertEqual(h3_server._stream[6].buffer, b"")
self.assertEqual(h3_server._stream[6].stream_type, 64)
def test_validate_settings_h3_datagram_invalid_value(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
# receive SETTINGS with an invalid H3_DATAGRAM value
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.H3_DATAGRAM] = 2
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"H3_DATAGRAM setting must be 0 or 1",
),
)
def test_validate_settings_h3_datagram_without_transport_parameter(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
# receive SETTINGS with H3_DATAGRAM=1 but no max_datagram_frame_size TP
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.H3_DATAGRAM] = 1
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"H3_DATAGRAM requires max_datagram_frame_size transport parameter",
),
)
def test_validate_settings_enable_connect_protocol_invalid_value(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
# receive SETTINGS with an invalid ENABLE_CONNECT_PROTOCOL value
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.ENABLE_CONNECT_PROTOCOL] = 2
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"ENABLE_CONNECT_PROTOCOL setting must be 0 or 1",
),
)
def test_validate_settings_enable_webtransport_invalid_value(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
# receive SETTINGS with an invalid ENABLE_WEBTRANSPORT value
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.ENABLE_WEBTRANSPORT] = 2
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"ENABLE_WEBTRANSPORT setting must be 0 or 1",
),
)
def test_validate_settings_enable_webtransport_without_h3_datagram(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
# receive SETTINGS requesting WebTransport, but DATAGRAM was not offered
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.ENABLE_WEBTRANSPORT] = 1
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"ENABLE_WEBTRANSPORT requires H3_DATAGRAM",
),
)
class H3ParserTest(TestCase):
def test_parse_settings_duplicate_identifier(self):
buf = Buffer(capacity=1024)
buf.push_uint_var(1)
buf.push_uint_var(123)
buf.push_uint_var(1)
buf.push_uint_var(456)
with self.assertRaises(SettingsError) as cm:
parse_settings(buf.data)
self.assertEqual(
cm.exception.reason_phrase, "Setting identifier 0x1 is included twice"
)
def test_parse_settings_reserved_identifier(self):
buf = Buffer(capacity=1024)
buf.push_uint_var(0)
buf.push_uint_var(123)
with self.assertRaises(SettingsError) as cm:
parse_settings(buf.data)
self.assertEqual(
cm.exception.reason_phrase, "Setting identifier 0x0 is reserved"
)
def test_validate_push_promise_headers(self):
# OK
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b":authority", b"localhost"),
]
)
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b":authority", b"localhost"),
(b"x-foo", b"bar"),
]
)
# invalid pseudo-header
with self.assertRaises(MessageError) as cm:
validate_push_promise_headers([(b":status", b"foo")])
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':status' is not valid"
)
# duplicate pseudo-header
with self.assertRaises(MessageError) as cm:
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":method", b"POST"),
]
)
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':method' is included twice"
)
# pseudo-header after regular headers
with self.assertRaises(MessageError) as cm:
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b"x-foo", b"bar"),
(b":authority", b"foo"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-header b':authority' is not allowed after regular headers",
)
# missing pseudo-headers
with self.assertRaises(MessageError) as cm:
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-headers [b':authority'] are missing",
)
def test_validate_request_headers(self):
# OK
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b":authority", b"localhost"),
]
)
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b":authority", b"localhost"),
(b"x-foo", b"bar"),
]
)
# uppercase header
with self.assertRaises(MessageError) as cm:
validate_request_headers([(b"X-Foo", b"foo")])
self.assertEqual(
cm.exception.reason_phrase, "Header b'X-Foo' contains uppercase letters"
)
# invalid pseudo-header
with self.assertRaises(MessageError) as cm:
validate_request_headers([(b":status", b"foo")])
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':status' is not valid"
)
# duplicate pseudo-header
with self.assertRaises(MessageError) as cm:
validate_request_headers(
[
(b":method", b"GET"),
(b":method", b"POST"),
]
)
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':method' is included twice"
)
# pseudo-header after regular headers
with self.assertRaises(MessageError) as cm:
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b"x-foo", b"bar"),
(b":authority", b"foo"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-header b':authority' is not allowed after regular headers",
)
# missing pseudo-headers
with self.assertRaises(MessageError) as cm:
validate_request_headers([(b":method", b"GET")])
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-headers [b':authority'] are missing",
)
# empty :authority pseudo-header for http/https
for scheme in [b"http", b"https"]:
with self.assertRaises(MessageError) as cm:
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", scheme),
(b":authority", b""),
(b":path", b"/"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-header b':authority' cannot be empty",
)
# empty :path pseudo-header for http/https
for scheme in [b"http", b"https"]:
with self.assertRaises(MessageError) as cm:
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", scheme),
(b":authority", b"localhost"),
(b":path", b""),
]
)
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':path' cannot be empty"
)
def test_validate_response_headers(self):
# OK
validate_response_headers([(b":status", b"200")])
validate_response_headers(
[
(b":status", b"200"),
(b"x-foo", b"bar"),
]
)
# invalid pseudo-header
with self.assertRaises(MessageError) as cm:
validate_response_headers([(b":method", b"GET")])
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':method' is not valid"
)
# duplicate pseudo-header
with self.assertRaises(MessageError) as cm:
validate_response_headers(
[
(b":status", b"200"),
(b":status", b"501"),
]
)
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':status' is included twice"
)
def test_validate_trailers(self):
# OK
validate_trailers([(b"x-foo", b"bar")])
# invalid pseudo-header
with self.assertRaises(MessageError) as cm:
validate_trailers([(b":status", b"foo")])
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':status' is not valid"
)
# pseudo-header after regular headers
with self.assertRaises(MessageError) as cm:
validate_trailers(
[
(b"x-foo", b"bar"),
(b":authority", b"foo"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-header b':authority' is not allowed after regular headers",
)
| 34.699066
| 90
| 0.500593
|
import binascii
import contextlib
import copy
from unittest import TestCase
from aioquic.buffer import Buffer, encode_uint_var
from aioquic.h3.connection import (
H3_ALPN,
ErrorCode,
FrameType,
FrameUnexpected,
H3Connection,
MessageError,
Setting,
SettingsError,
StreamType,
encode_frame,
encode_settings,
parse_settings,
validate_push_promise_headers,
validate_request_headers,
validate_response_headers,
validate_trailers,
)
from aioquic.h3.events import DataReceived, HeadersReceived, PushPromiseReceived
from aioquic.h3.exceptions import NoAvailablePushIDError
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.events import StreamDataReceived
from aioquic.quic.logger import QuicLogger
from .test_connection import client_and_server, transfer
DUMMY_SETTINGS = {
Setting.QPACK_MAX_TABLE_CAPACITY: 4096,
Setting.QPACK_BLOCKED_STREAMS: 16,
Setting.DUMMY: 1,
}
QUIC_CONFIGURATION_OPTIONS = {"alpn_protocols": H3_ALPN}
def h3_client_and_server(options=QUIC_CONFIGURATION_OPTIONS):
return client_and_server(
client_options=options,
server_options=options,
)
@contextlib.contextmanager
def h3_fake_client_and_server(options=QUIC_CONFIGURATION_OPTIONS):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True, **options)
)
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False, **options)
)
quic_client._remote_max_datagram_frame_size = (
quic_server.configuration.max_datagram_frame_size
)
quic_server._remote_max_datagram_frame_size = (
quic_client.configuration.max_datagram_frame_size
)
yield quic_client, quic_server
def h3_transfer(quic_sender, h3_receiver):
quic_receiver = h3_receiver._quic
if hasattr(quic_sender, "stream_queue"):
quic_receiver._events.extend(quic_sender.stream_queue)
quic_sender.stream_queue.clear()
else:
transfer(quic_sender, quic_receiver)
http_events = []
event = quic_receiver.next_event()
while event is not None:
http_events.extend(h3_receiver.handle_event(event))
event = quic_receiver.next_event()
return http_events
class FakeQuicConnection:
def __init__(self, configuration):
self.closed = None
self.configuration = configuration
self.stream_queue = []
self._events = []
self._next_stream_bidi = 0 if configuration.is_client else 1
self._next_stream_uni = 2 if configuration.is_client else 3
self._quic_logger = QuicLogger().start_trace(
is_client=configuration.is_client, odcid=b""
)
self._remote_max_datagram_frame_size = None
def close(self, error_code, reason_phrase):
self.closed = (error_code, reason_phrase)
def get_next_available_stream_id(self, is_unidirectional=False):
if is_unidirectional:
stream_id = self._next_stream_uni
self._next_stream_uni += 4
else:
stream_id = self._next_stream_bidi
self._next_stream_bidi += 4
return stream_id
def next_event(self):
try:
return self._events.pop(0)
except IndexError:
return None
def send_stream_data(self, stream_id, data, end_stream=False):
for c in data:
self.stream_queue.append(
StreamDataReceived(
data=bytes([c]), end_stream=False, stream_id=stream_id
)
)
if end_stream:
self.stream_queue.append(
StreamDataReceived(data=b"", end_stream=end_stream, stream_id=stream_id)
)
class H3ConnectionTest(TestCase):
maxDiff = None
def _make_request(self, h3_client, h3_server):
quic_client = h3_client._quic
quic_server = h3_server._quic
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
)
h3_client.send_data(stream_id=stream_id, data=b"", end_stream=True)
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(data=b"", stream_id=stream_id, stream_ended=True),
],
)
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
(b"x-foo", b"server"),
],
)
h3_server.send_data(
stream_id=stream_id,
data=b"<html><body>hello</body></html>",
end_stream=True,
)
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
(b"x-foo", b"server"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(
data=b"<html><body>hello</body></html>",
stream_id=stream_id,
stream_ended=True,
),
],
)
def test_handle_control_frame_headers(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
self.assertIsNotNone(h3_server.sent_settings)
self.assertIsNone(h3_server.received_settings)
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertIsNone(quic_server.closed)
self.assertIsNotNone(h3_server.sent_settings)
self.assertEqual(h3_server.received_settings, DUMMY_SETTINGS)
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_frame(FrameType.HEADERS, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Invalid frame type on control stream"),
)
def test_handle_control_frame_max_push_id_from_client_before_settings(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.MAX_PUSH_ID, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_MISSING_SETTINGS, ""),
)
def test_handle_control_frame_max_push_id_from_server(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertIsNone(quic_client.closed)
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=encode_frame(FrameType.MAX_PUSH_ID, b""),
end_stream=False,
)
)
self.assertEqual(
quic_client.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Servers must not send MAX_PUSH_ID"),
)
def test_handle_control_settings_twice(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertIsNone(quic_server.closed)
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "SETTINGS have already been received"),
)
def test_handle_control_stream_close(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(DUMMY_SETTINGS)),
end_stream=False,
)
)
self.assertIsNone(quic_client.closed)
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=b"",
end_stream=True,
)
)
self.assertEqual(
quic_client.closed,
(
ErrorCode.H3_CLOSED_CRITICAL_STREAM,
"Closing control stream is not allowed",
),
)
def test_handle_control_stream_duplicate(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=2, data=encode_uint_var(StreamType.CONTROL), end_stream=False
)
)
h3_server.handle_event(
StreamDataReceived(
stream_id=6, data=encode_uint_var(StreamType.CONTROL), end_stream=False
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_STREAM_CREATION_ERROR,
"Only one control stream is allowed",
),
)
def test_handle_push_frame_wrong_frame_type(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=15,
data=encode_uint_var(StreamType.PUSH)
+ encode_uint_var(0)
+ encode_frame(FrameType.SETTINGS, b""),
end_stream=False,
)
)
self.assertEqual(
quic_client.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Invalid frame type on push stream"),
)
def test_handle_qpack_decoder_duplicate(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=11,
data=encode_uint_var(StreamType.QPACK_DECODER),
end_stream=False,
)
)
h3_client.handle_event(
StreamDataReceived(
stream_id=15,
data=encode_uint_var(StreamType.QPACK_DECODER),
end_stream=False,
)
)
self.assertEqual(
quic_client.closed,
(
ErrorCode.H3_STREAM_CREATION_ERROR,
"Only one QPACK decoder stream is allowed",
),
)
def test_handle_qpack_decoder_stream_error(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=11,
data=encode_uint_var(StreamType.QPACK_DECODER) + b"\x00",
end_stream=False,
)
)
self.assertEqual(quic_client.closed, (ErrorCode.QPACK_DECODER_STREAM_ERROR, ""))
def test_handle_qpack_encoder_duplicate(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=11,
data=encode_uint_var(StreamType.QPACK_ENCODER),
end_stream=False,
)
)
h3_client.handle_event(
StreamDataReceived(
stream_id=15,
data=encode_uint_var(StreamType.QPACK_ENCODER),
end_stream=False,
)
)
self.assertEqual(
quic_client.closed,
(
ErrorCode.H3_STREAM_CREATION_ERROR,
"Only one QPACK encoder stream is allowed",
),
)
def test_handle_qpack_encoder_stream_error(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=7,
data=encode_uint_var(StreamType.QPACK_ENCODER) + b"\x00",
end_stream=False,
)
)
self.assertEqual(quic_client.closed, (ErrorCode.QPACK_ENCODER_STREAM_ERROR, ""))
def test_handle_request_frame_bad_headers(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0, data=encode_frame(FrameType.HEADERS, b""), end_stream=False
)
)
self.assertEqual(quic_server.closed, (ErrorCode.QPACK_DECOMPRESSION_FAILED, ""))
def test_handle_request_frame_data_before_headers(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0, data=encode_frame(FrameType.DATA, b""), end_stream=False
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_FRAME_UNEXPECTED,
"DATA frame is not allowed in this state",
),
)
def test_handle_request_frame_headers_after_trailers(self):
with h3_fake_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
)
h3_client.send_headers(
stream_id=stream_id,
headers=[(b"x-some-trailer", b"foo")],
end_stream=True,
)
h3_transfer(quic_client, h3_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0,
data=encode_frame(FrameType.HEADERS, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_FRAME_UNEXPECTED,
"HEADERS frame is not allowed in this state",
),
)
def test_handle_request_frame_push_promise_from_client(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0,
data=encode_frame(FrameType.PUSH_PROMISE, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Clients must not send PUSH_PROMISE"),
)
def test_handle_request_frame_wrong_frame_type(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
h3_server.handle_event(
StreamDataReceived(
stream_id=0,
data=encode_frame(FrameType.SETTINGS, b""),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(ErrorCode.H3_FRAME_UNEXPECTED, "Invalid frame type on request stream"),
)
def test_request(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
self._make_request(h3_client, h3_server)
self._make_request(h3_client, h3_server)
self._make_request(h3_client, h3_server)
def test_request_headers_only(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"HEAD"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
end_stream=True,
)
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"HEAD"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
stream_id=stream_id,
stream_ended=True,
)
],
)
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
(b"x-foo", b"server"),
],
end_stream=True,
)
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
(b"x-foo", b"server"),
],
stream_id=stream_id,
stream_ended=True,
)
],
)
def test_request_fragmented_frame(self):
with h3_fake_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
)
h3_client.send_data(stream_id=stream_id, data=b"hello", end_stream=True)
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
(b"x-foo", b"client"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(data=b"h", stream_id=0, stream_ended=False),
DataReceived(data=b"e", stream_id=0, stream_ended=False),
DataReceived(data=b"l", stream_id=0, stream_ended=False),
DataReceived(data=b"l", stream_id=0, stream_ended=False),
DataReceived(data=b"o", stream_id=0, stream_ended=False),
DataReceived(data=b"", stream_id=0, stream_ended=True),
],
)
push_stream_id = h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.txt"),
],
)
self.assertEqual(push_stream_id, 15)
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
end_stream=False,
)
h3_server.send_data(stream_id=stream_id, data=b"html", end_stream=True)
h3_server.send_headers(
stream_id=push_stream_id,
headers=[(b":status", b"200"), (b"content-type", b"text/plain")],
end_stream=False,
)
h3_server.send_data(stream_id=push_stream_id, data=b"text", end_stream=True)
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
PushPromiseReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.txt"),
],
push_id=0,
stream_id=stream_id,
),
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
stream_id=0,
stream_ended=False,
),
DataReceived(data=b"h", stream_id=0, stream_ended=False),
DataReceived(data=b"t", stream_id=0, stream_ended=False),
DataReceived(data=b"m", stream_id=0, stream_ended=False),
DataReceived(data=b"l", stream_id=0, stream_ended=False),
DataReceived(data=b"", stream_id=0, stream_ended=True),
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/plain"),
],
stream_id=15,
stream_ended=False,
push_id=0,
),
DataReceived(
data=b"t", stream_id=15, stream_ended=False, push_id=0
),
DataReceived(
data=b"e", stream_id=15, stream_ended=False, push_id=0
),
DataReceived(
data=b"x", stream_id=15, stream_ended=False, push_id=0
),
DataReceived(
data=b"t", stream_id=15, stream_ended=False, push_id=0
),
DataReceived(data=b"", stream_id=15, stream_ended=True, push_id=0),
],
)
def test_request_with_server_push(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
end_stream=True,
)
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
stream_id=stream_id,
stream_ended=True,
)
],
)
push_stream_id_css = h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.css"),
],
)
self.assertEqual(push_stream_id_css, 15)
push_stream_id_js = h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.js"),
],
)
self.assertEqual(push_stream_id_js, 19)
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
end_stream=False,
)
h3_server.send_data(
stream_id=stream_id,
data=b"<html><body>hello</body></html>",
end_stream=True,
)
h3_server.send_headers(
stream_id=push_stream_id_css,
headers=[(b":status", b"200"), (b"content-type", b"text/css")],
end_stream=False,
)
h3_server.send_data(
stream_id=push_stream_id_css,
data=b"body { color: pink }",
end_stream=True,
)
h3_server.send_headers(
stream_id=push_stream_id_js,
headers=[
(b":status", b"200"),
(b"content-type", b"application/javascript"),
],
end_stream=False,
)
h3_server.send_data(
stream_id=push_stream_id_js, data=b"alert('howdee');", end_stream=True
)
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
PushPromiseReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.css"),
],
push_id=0,
stream_id=stream_id,
),
PushPromiseReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/app.js"),
],
push_id=1,
stream_id=stream_id,
),
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(
data=b"<html><body>hello</body></html>",
stream_id=stream_id,
stream_ended=True,
),
HeadersReceived(
headers=[(b":status", b"200"), (b"content-type", b"text/css")],
push_id=0,
stream_id=push_stream_id_css,
stream_ended=False,
),
DataReceived(
data=b"body { color: pink }",
push_id=0,
stream_id=push_stream_id_css,
stream_ended=True,
),
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"application/javascript"),
],
push_id=1,
stream_id=push_stream_id_js,
stream_ended=False,
),
DataReceived(
data=b"alert('howdee');",
push_id=1,
stream_id=push_stream_id_js,
stream_ended=True,
),
],
)
def test_request_with_server_push_max_push_id(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
end_stream=True,
)
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
stream_id=stream_id,
stream_ended=True,
)
],
)
for i in range(0, 8):
h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", "/{}.css".format(i).encode("ascii")),
],
)
with self.assertRaises(NoAvailablePushIDError):
h3_server.send_push_promise(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/8.css"),
],
)
def test_send_data_after_trailers(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
)
h3_client.send_headers(
stream_id=stream_id, headers=[(b"x-some-trailer", b"foo")], end_stream=False
)
with self.assertRaises(FrameUnexpected):
h3_client.send_data(stream_id=stream_id, data=b"hello", end_stream=False)
def test_send_data_before_headers(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
stream_id = quic_client.get_next_available_stream_id()
with self.assertRaises(FrameUnexpected):
h3_client.send_data(stream_id=stream_id, data=b"hello", end_stream=False)
def test_send_headers_after_trailers(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
)
h3_client.send_headers(
stream_id=stream_id, headers=[(b"x-some-trailer", b"foo")], end_stream=False
)
with self.assertRaises(FrameUnexpected):
h3_client.send_headers(
stream_id=stream_id,
headers=[(b"x-other-trailer", b"foo")],
end_stream=False,
)
def test_blocked_stream(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=binascii.unhexlify(
"0004170150000680020000074064091040bcc0000000faceb00c"
),
end_stream=False,
)
)
h3_client.handle_event(
StreamDataReceived(stream_id=7, data=b"\x02", end_stream=False)
)
h3_client.handle_event(
StreamDataReceived(stream_id=11, data=b"\x03", end_stream=False)
)
h3_client.handle_event(
StreamDataReceived(
stream_id=0, data=binascii.unhexlify("01040280d910"), end_stream=False
)
)
h3_client.handle_event(
StreamDataReceived(
stream_id=0,
data=binascii.unhexlify(
"00408d796f752072656163686564206d766673742e6e65742c20726561636820"
"746865202f6563686f20656e64706f696e7420666f7220616e206563686f2072"
"6573706f6e7365207175657279202f3c6e756d6265723e20656e64706f696e74"
"7320666f722061207661726961626c652073697a6520726573706f6e73652077"
"6974682072616e646f6d206279746573"
),
end_stream=True,
)
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=7,
data=binascii.unhexlify(
"3fe101c696d07abe941094cb6d0a08017d403971966e32ca98b46f"
),
end_stream=False,
)
),
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"date", b"Mon, 22 Jul 2019 06:33:33 GMT"),
],
stream_id=0,
stream_ended=False,
),
DataReceived(
data=(
b"you reached mvfst.net, reach the /echo endpoint for an "
b"echo response query /<number> endpoints for a variable "
b"size response with random bytes"
),
stream_id=0,
stream_ended=True,
),
],
)
def test_blocked_stream_trailer(self):
quic_client = FakeQuicConnection(
configuration=QuicConfiguration(is_client=True)
)
h3_client = H3Connection(quic_client)
h3_client.handle_event(
StreamDataReceived(
stream_id=3,
data=binascii.unhexlify(
"0004170150000680020000074064091040bcc0000000faceb00c"
),
end_stream=False,
)
)
h3_client.handle_event(
StreamDataReceived(stream_id=7, data=b"\x02", end_stream=False)
)
h3_client.handle_event(
StreamDataReceived(stream_id=11, data=b"\x03", end_stream=False)
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=0,
data=binascii.unhexlify(
"011b0000d95696d07abe941094cb6d0a08017d403971966e32ca98b46f"
),
end_stream=False,
)
),
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"date", b"Mon, 22 Jul 2019 06:33:33 GMT"),
],
stream_id=0,
stream_ended=False,
)
],
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=0,
data=binascii.unhexlify(
"00408d796f752072656163686564206d766673742e6e65742c20726561636820"
"746865202f6563686f20656e64706f696e7420666f7220616e206563686f2072"
"6573706f6e7365207175657279202f3c6e756d6265723e20656e64706f696e74"
"7320666f722061207661726961626c652073697a6520726573706f6e73652077"
"6974682072616e646f6d206279746573"
),
end_stream=False,
)
),
[
DataReceived(
data=(
b"you reached mvfst.net, reach the /echo endpoint for an "
b"echo response query /<number> endpoints for a variable "
b"size response with random bytes"
),
stream_id=0,
stream_ended=False,
)
],
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=0, data=binascii.unhexlify("0103028010"), end_stream=True
)
),
[],
)
self.assertEqual(
h3_client.handle_event(
StreamDataReceived(
stream_id=7,
data=binascii.unhexlify("6af2b20f49564d833505b38294e7"),
end_stream=False,
)
),
[
HeadersReceived(
headers=[(b"x-some-trailer", b"foo")],
stream_id=0,
stream_ended=True,
push_id=None,
)
],
)
def test_uni_stream_grease(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_server = H3Connection(quic_server)
quic_client.send_stream_data(
14, b"\xff\xff\xff\xff\xff\xff\xff\xfeGREASE is the word"
)
self.assertEqual(h3_transfer(quic_client, h3_server), [])
def test_request_with_trailers(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_client = H3Connection(quic_client)
h3_server = H3Connection(quic_server)
stream_id = quic_client.get_next_available_stream_id()
h3_client.send_headers(
stream_id=stream_id,
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
end_stream=False,
)
h3_client.send_headers(
stream_id=stream_id,
headers=[(b"x-some-trailer", b"foo")],
end_stream=True,
)
events = h3_transfer(quic_client, h3_server)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":authority", b"localhost"),
(b":path", b"/"),
],
stream_id=stream_id,
stream_ended=False,
),
HeadersReceived(
headers=[(b"x-some-trailer", b"foo")],
stream_id=stream_id,
stream_ended=True,
),
],
)
h3_server.send_headers(
stream_id=stream_id,
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
end_stream=False,
)
h3_server.send_data(
stream_id=stream_id,
data=b"<html><body>hello</body></html>",
end_stream=False,
)
h3_server.send_headers(
stream_id=stream_id,
headers=[(b"x-some-trailer", b"bar")],
end_stream=True,
)
events = h3_transfer(quic_server, h3_client)
self.assertEqual(
events,
[
HeadersReceived(
headers=[
(b":status", b"200"),
(b"content-type", b"text/html; charset=utf-8"),
],
stream_id=stream_id,
stream_ended=False,
),
DataReceived(
data=b"<html><body>hello</body></html>",
stream_id=stream_id,
stream_ended=False,
),
HeadersReceived(
headers=[(b"x-some-trailer", b"bar")],
stream_id=stream_id,
stream_ended=True,
),
],
)
def test_uni_stream_type(self):
with h3_client_and_server() as (quic_client, quic_server):
h3_server = H3Connection(quic_server)
stream_id = quic_client.get_next_available_stream_id(is_unidirectional=True)
self.assertEqual(stream_id, 2)
quic_client.send_stream_data(stream_id, b"\x09")
self.assertEqual(h3_transfer(quic_client, h3_server), [])
self.assertEqual(list(h3_server._stream.keys()), [2])
self.assertEqual(h3_server._stream[2].buffer, b"")
self.assertEqual(h3_server._stream[2].stream_type, 9)
stream_id = quic_client.get_next_available_stream_id(is_unidirectional=True)
self.assertEqual(stream_id, 6)
quic_client.send_stream_data(stream_id, b"\x40")
self.assertEqual(h3_transfer(quic_client, h3_server), [])
self.assertEqual(list(h3_server._stream.keys()), [2, 6])
self.assertEqual(h3_server._stream[2].buffer, b"")
self.assertEqual(h3_server._stream[2].stream_type, 9)
self.assertEqual(h3_server._stream[6].buffer, b"\x40")
self.assertEqual(h3_server._stream[6].stream_type, None)
quic_client.send_stream_data(stream_id, b"\x40")
self.assertEqual(h3_transfer(quic_client, h3_server), [])
self.assertEqual(list(h3_server._stream.keys()), [2, 6])
self.assertEqual(h3_server._stream[2].buffer, b"")
self.assertEqual(h3_server._stream[2].stream_type, 9)
self.assertEqual(h3_server._stream[6].buffer, b"")
self.assertEqual(h3_server._stream[6].stream_type, 64)
def test_validate_settings_h3_datagram_invalid_value(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.H3_DATAGRAM] = 2
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"H3_DATAGRAM setting must be 0 or 1",
),
)
def test_validate_settings_h3_datagram_without_transport_parameter(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.H3_DATAGRAM] = 1
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"H3_DATAGRAM requires max_datagram_frame_size transport parameter",
),
)
def test_validate_settings_enable_connect_protocol_invalid_value(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.ENABLE_CONNECT_PROTOCOL] = 2
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"ENABLE_CONNECT_PROTOCOL setting must be 0 or 1",
),
)
def test_validate_settings_enable_webtransport_invalid_value(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.ENABLE_WEBTRANSPORT] = 2
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"ENABLE_WEBTRANSPORT setting must be 0 or 1",
),
)
def test_validate_settings_enable_webtransport_without_h3_datagram(self):
quic_server = FakeQuicConnection(
configuration=QuicConfiguration(is_client=False)
)
h3_server = H3Connection(quic_server)
settings = copy.copy(DUMMY_SETTINGS)
settings[Setting.ENABLE_WEBTRANSPORT] = 1
h3_server.handle_event(
StreamDataReceived(
stream_id=2,
data=encode_uint_var(StreamType.CONTROL)
+ encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
)
self.assertEqual(
quic_server.closed,
(
ErrorCode.H3_SETTINGS_ERROR,
"ENABLE_WEBTRANSPORT requires H3_DATAGRAM",
),
)
class H3ParserTest(TestCase):
def test_parse_settings_duplicate_identifier(self):
buf = Buffer(capacity=1024)
buf.push_uint_var(1)
buf.push_uint_var(123)
buf.push_uint_var(1)
buf.push_uint_var(456)
with self.assertRaises(SettingsError) as cm:
parse_settings(buf.data)
self.assertEqual(
cm.exception.reason_phrase, "Setting identifier 0x1 is included twice"
)
def test_parse_settings_reserved_identifier(self):
buf = Buffer(capacity=1024)
buf.push_uint_var(0)
buf.push_uint_var(123)
with self.assertRaises(SettingsError) as cm:
parse_settings(buf.data)
self.assertEqual(
cm.exception.reason_phrase, "Setting identifier 0x0 is reserved"
)
def test_validate_push_promise_headers(self):
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b":authority", b"localhost"),
]
)
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b":authority", b"localhost"),
(b"x-foo", b"bar"),
]
)
with self.assertRaises(MessageError) as cm:
validate_push_promise_headers([(b":status", b"foo")])
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':status' is not valid"
)
with self.assertRaises(MessageError) as cm:
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":method", b"POST"),
]
)
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':method' is included twice"
)
with self.assertRaises(MessageError) as cm:
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b"x-foo", b"bar"),
(b":authority", b"foo"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-header b':authority' is not allowed after regular headers",
)
with self.assertRaises(MessageError) as cm:
validate_push_promise_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-headers [b':authority'] are missing",
)
def test_validate_request_headers(self):
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b":authority", b"localhost"),
]
)
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b":authority", b"localhost"),
(b"x-foo", b"bar"),
]
)
with self.assertRaises(MessageError) as cm:
validate_request_headers([(b"X-Foo", b"foo")])
self.assertEqual(
cm.exception.reason_phrase, "Header b'X-Foo' contains uppercase letters"
)
with self.assertRaises(MessageError) as cm:
validate_request_headers([(b":status", b"foo")])
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':status' is not valid"
)
with self.assertRaises(MessageError) as cm:
validate_request_headers(
[
(b":method", b"GET"),
(b":method", b"POST"),
]
)
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':method' is included twice"
)
with self.assertRaises(MessageError) as cm:
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", b"https"),
(b":path", b"/"),
(b"x-foo", b"bar"),
(b":authority", b"foo"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-header b':authority' is not allowed after regular headers",
)
with self.assertRaises(MessageError) as cm:
validate_request_headers([(b":method", b"GET")])
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-headers [b':authority'] are missing",
)
for scheme in [b"http", b"https"]:
with self.assertRaises(MessageError) as cm:
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", scheme),
(b":authority", b""),
(b":path", b"/"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-header b':authority' cannot be empty",
)
for scheme in [b"http", b"https"]:
with self.assertRaises(MessageError) as cm:
validate_request_headers(
[
(b":method", b"GET"),
(b":scheme", scheme),
(b":authority", b"localhost"),
(b":path", b""),
]
)
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':path' cannot be empty"
)
def test_validate_response_headers(self):
validate_response_headers([(b":status", b"200")])
validate_response_headers(
[
(b":status", b"200"),
(b"x-foo", b"bar"),
]
)
with self.assertRaises(MessageError) as cm:
validate_response_headers([(b":method", b"GET")])
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':method' is not valid"
)
with self.assertRaises(MessageError) as cm:
validate_response_headers(
[
(b":status", b"200"),
(b":status", b"501"),
]
)
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':status' is included twice"
)
def test_validate_trailers(self):
validate_trailers([(b"x-foo", b"bar")])
with self.assertRaises(MessageError) as cm:
validate_trailers([(b":status", b"foo")])
self.assertEqual(
cm.exception.reason_phrase, "Pseudo-header b':status' is not valid"
)
with self.assertRaises(MessageError) as cm:
validate_trailers(
[
(b"x-foo", b"bar"),
(b":authority", b"foo"),
]
)
self.assertEqual(
cm.exception.reason_phrase,
"Pseudo-header b':authority' is not allowed after regular headers",
)
| true
| true
|
f7193fce1fa7b0ba46bf2b1d2e9a114b6453e540
| 2,901
|
py
|
Python
|
scripts/md_to_html.py
|
fossabot/granite
|
7eab82126d0cddb4fdad0c3ba2c6f431eea19cfb
|
[
"MIT"
] | null | null | null |
scripts/md_to_html.py
|
fossabot/granite
|
7eab82126d0cddb4fdad0c3ba2c6f431eea19cfb
|
[
"MIT"
] | 3
|
2021-02-06T17:29:31.000Z
|
2021-05-27T20:48:58.000Z
|
scripts/md_to_html.py
|
fossabot/granite
|
7eab82126d0cddb4fdad0c3ba2c6f431eea19cfb
|
[
"MIT"
] | 2
|
2021-02-02T23:07:50.000Z
|
2021-03-27T22:06:27.000Z
|
#! /usr/bin/env python3
# Script from https://gist.github.com/jiffyclub/5015986
# This script turns Markdown into HTML using the Python markdown library and wraps the result in a
# complete HTML document with default Bootstrap styling so that it's immediately printable.
# Requires the python libraries jinja2, markdown, and mdx_smartypants.
import argparse
import sys
import jinja2
import markdown
# To install dependencies in a virtualenv:
# $ py -3 -3 venv .venv
# $ .venv/Scripts/activate
# $ pip install jinja2
# $ pip install markdown
#
# To install dependencies on Ubuntu:
# $ sudo apt-get install python-jinja2 python-markdown
TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<link href="http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.0/css/bootstrap-combined.min.css" rel="stylesheet">
<style>
body {
font-family: sans-serif;
}
code, pre {
font-family: monospace;
}
h1 code,
h2 code,
h3 code,
h4 code,
h5 code,
h6 code {
font-size: inherit;
}
</style>
</head>
<body>
<div class="container">
{{content}}
</div>
</body>
</html>
"""
# TEMPLATE = """<!DOCTYPE html>
# <html>
# <head>
# <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
# <meta name="referrer" content="no-referrer" />
# <meta name="referrer" content="unsafe-url" />
# <meta name="referrer" content="origin" />
# <meta name="referrer" content="no-referrer-when-downgrade" />
# <meta name="referrer" content="origin-when-cross-origin" />
# <title>Page Title</title>
# <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet">
# <style>
# body {
# font-family: Helvetica,Arial,sans-serif;
# }
# code, pre {
# font-family: monospace;
# }
# </style>
# </head>
# <body>
# <div class="container">
# {{content}}
# </div>
# </body>
# </html>
# """
def parse_args(args=None):
d = 'Make a complete, styled HTML document from a Markdown file.'
parser = argparse.ArgumentParser(description=d)
parser.add_argument('mdfile', type=argparse.FileType('r'), nargs='?',
default=sys.stdin,
help='File to convert. Defaults to stdin.')
parser.add_argument('-o', '--out', type=argparse.FileType('w'),
default=sys.stdout,
help='Output file name. Defaults to stdout.')
return parser.parse_args(args)
def main(args=None):
args = parse_args(args)
md = args.mdfile.read()
extensions = ['extra', 'smarty']
html = markdown.markdown(md, extensions=extensions, output_format='html5')
doc = jinja2.Template(TEMPLATE).render(content=html)
args.out.write(doc)
if __name__ == '__main__':
sys.exit()
| 29.30303
| 120
| 0.612203
|
# Requires the python libraries jinja2, markdown, and mdx_smartypants.
import argparse
import sys
import jinja2
import markdown
# To install dependencies in a virtualenv:
# $ py -3 -3 venv .venv
# $ .venv/Scripts/activate
# $ pip install jinja2
# $ pip install markdown
#
# To install dependencies on Ubuntu:
# $ sudo apt-get install python-jinja2 python-markdown
TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<link href="http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.0/css/bootstrap-combined.min.css" rel="stylesheet">
<style>
body {
font-family: sans-serif;
}
code, pre {
font-family: monospace;
}
h1 code,
h2 code,
h3 code,
h4 code,
h5 code,
h6 code {
font-size: inherit;
}
</style>
</head>
<body>
<div class="container">
{{content}}
</div>
</body>
</html>
"""
# TEMPLATE = """<!DOCTYPE html>
# <html>
# <head>
# <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
# <meta name="referrer" content="no-referrer" />
# <meta name="referrer" content="unsafe-url" />
# <meta name="referrer" content="origin" />
# <meta name="referrer" content="no-referrer-when-downgrade" />
# <meta name="referrer" content="origin-when-cross-origin" />
# <title>Page Title</title>
# <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet">
# <style>
# body {
# font-family: Helvetica,Arial,sans-serif;
# }
# code, pre {
# font-family: monospace;
# }
# </style>
# </head>
# <body>
# <div class="container">
# {{content}}
# </div>
# </body>
# </html>
# """
def parse_args(args=None):
d = 'Make a complete, styled HTML document from a Markdown file.'
parser = argparse.ArgumentParser(description=d)
parser.add_argument('mdfile', type=argparse.FileType('r'), nargs='?',
default=sys.stdin,
help='File to convert. Defaults to stdin.')
parser.add_argument('-o', '--out', type=argparse.FileType('w'),
default=sys.stdout,
help='Output file name. Defaults to stdout.')
return parser.parse_args(args)
def main(args=None):
args = parse_args(args)
md = args.mdfile.read()
extensions = ['extra', 'smarty']
html = markdown.markdown(md, extensions=extensions, output_format='html5')
doc = jinja2.Template(TEMPLATE).render(content=html)
args.out.write(doc)
if __name__ == '__main__':
sys.exit()
| true
| true
|
f7193fdcc986cc8bca9f6efe5170d075ac0c3ace
| 7,030
|
py
|
Python
|
dm_control/rl/specs_test.py
|
1nadequacy/dm_control
|
a55474768cf0a6d570fe4a376802630027ad5f01
|
[
"Apache-2.0"
] | null | null | null |
dm_control/rl/specs_test.py
|
1nadequacy/dm_control
|
a55474768cf0a6d570fe4a376802630027ad5f01
|
[
"Apache-2.0"
] | null | null | null |
dm_control/rl/specs_test.py
|
1nadequacy/dm_control
|
a55474768cf0a6d570fe4a376802630027ad5f01
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for specs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Internal dependencies.
from absl.testing import absltest
from dm_control.rl import specs as array_spec
import numpy as np
import six
class ArraySpecTest(absltest.TestCase):
def testShapeTypeError(self):
with self.assertRaises(TypeError):
array_spec.ArraySpec(32, np.int32)
def testDtypeTypeError(self):
with self.assertRaises(TypeError):
array_spec.ArraySpec((1, 2, 3), "32")
def testStringDtype(self):
array_spec.ArraySpec((1, 2, 3), "int32")
def testNumpyDtype(self):
array_spec.ArraySpec((1, 2, 3), np.int32)
def testDtype(self):
spec = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertEqual(np.int32, spec.dtype)
def testShape(self):
spec = array_spec.ArraySpec([1, 2, 3], np.int32)
self.assertEqual((1, 2, 3), spec.shape)
def testEqual(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertEqual(spec_1, spec_2)
def testNotEqualDifferentShape(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = array_spec.ArraySpec((1, 3, 3), np.int32)
self.assertNotEqual(spec_1, spec_2)
def testNotEqualDifferentDtype(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int64)
spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertNotEqual(spec_1, spec_2)
def testNotEqualOtherClass(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = None
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = ()
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
def testIsUnhashable(self):
spec = array_spec.ArraySpec(shape=(1, 2, 3), dtype=np.int32)
with self.assertRaisesRegexp(TypeError, "unhashable type"):
hash(spec)
def testValidateDtype(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
spec.validate(np.zeros((1, 2), dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.zeros((1, 2), dtype=np.float32))
def testValidateShape(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
spec.validate(np.zeros((1, 2), dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.zeros((1, 2, 3), dtype=np.int32))
def testGenerateValue(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
test_value = spec.generate_value()
spec.validate(test_value)
class BoundedArraySpecTest(absltest.TestCase):
def testInvalidMinimum(self):
with six.assertRaisesRegex(self, ValueError, "not compatible"):
array_spec.BoundedArraySpec((3, 5), np.uint8, (0, 0, 0), (1, 1))
def testInvalidMaximum(self):
with six.assertRaisesRegex(self, ValueError, "not compatible"):
array_spec.BoundedArraySpec((3, 5), np.uint8, 0, (1, 1, 1))
def testMinMaxAttributes(self):
spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))
self.assertEqual(type(spec.minimum), np.ndarray)
self.assertEqual(type(spec.maximum), np.ndarray)
def testNotWriteable(self):
spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))
with six.assertRaisesRegex(self, ValueError, "read-only"):
spec.minimum[0] = -1
with six.assertRaisesRegex(self, ValueError, "read-only"):
spec.maximum[0] = 100
def testEqualBroadcastingBounds(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=0.0, maximum=1.0)
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertEqual(spec_1, spec_2)
def testNotEqualDifferentMinimum(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertNotEqual(spec_1, spec_2)
def testNotEqualOtherClass(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])
spec_2 = array_spec.ArraySpec((1, 2), np.int32)
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = None
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = ()
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
def testNotEqualDifferentMaximum(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=0.0, maximum=2.0)
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertNotEqual(spec_1, spec_2)
def testIsUnhashable(self):
spec = array_spec.BoundedArraySpec(
shape=(1, 2), dtype=np.int32, minimum=0.0, maximum=2.0)
with self.assertRaisesRegexp(TypeError, "unhashable type"):
hash(spec)
def testRepr(self):
as_string = repr(array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=101.0, maximum=73.0))
self.assertIn("101", as_string)
self.assertIn("73", as_string)
def testValidateBounds(self):
spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)
spec.validate(np.array([[5, 6], [8, 10]], dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.array([[5, 6], [8, 11]], dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.array([[4, 6], [8, 10]], dtype=np.int32))
def testGenerateValue(self):
spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)
test_value = spec.generate_value()
spec.validate(test_value)
def testScalarBounds(self):
spec = array_spec.BoundedArraySpec((), np.float, minimum=0.0, maximum=1.0)
self.assertIsInstance(spec.minimum, np.ndarray)
self.assertIsInstance(spec.maximum, np.ndarray)
# Sanity check that numpy compares correctly to a scalar for an empty shape.
self.assertEqual(0.0, spec.minimum)
self.assertEqual(1.0, spec.maximum)
# Check that the spec doesn't fail its own input validation.
_ = array_spec.BoundedArraySpec(
spec.shape, spec.dtype, spec.minimum, spec.maximum)
if __name__ == "__main__":
absltest.main()
| 34.975124
| 80
| 0.683642
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from dm_control.rl import specs as array_spec
import numpy as np
import six
class ArraySpecTest(absltest.TestCase):
def testShapeTypeError(self):
with self.assertRaises(TypeError):
array_spec.ArraySpec(32, np.int32)
def testDtypeTypeError(self):
with self.assertRaises(TypeError):
array_spec.ArraySpec((1, 2, 3), "32")
def testStringDtype(self):
array_spec.ArraySpec((1, 2, 3), "int32")
def testNumpyDtype(self):
array_spec.ArraySpec((1, 2, 3), np.int32)
def testDtype(self):
spec = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertEqual(np.int32, spec.dtype)
def testShape(self):
spec = array_spec.ArraySpec([1, 2, 3], np.int32)
self.assertEqual((1, 2, 3), spec.shape)
def testEqual(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertEqual(spec_1, spec_2)
def testNotEqualDifferentShape(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = array_spec.ArraySpec((1, 3, 3), np.int32)
self.assertNotEqual(spec_1, spec_2)
def testNotEqualDifferentDtype(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int64)
spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertNotEqual(spec_1, spec_2)
def testNotEqualOtherClass(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = None
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = ()
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
def testIsUnhashable(self):
spec = array_spec.ArraySpec(shape=(1, 2, 3), dtype=np.int32)
with self.assertRaisesRegexp(TypeError, "unhashable type"):
hash(spec)
def testValidateDtype(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
spec.validate(np.zeros((1, 2), dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.zeros((1, 2), dtype=np.float32))
def testValidateShape(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
spec.validate(np.zeros((1, 2), dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.zeros((1, 2, 3), dtype=np.int32))
def testGenerateValue(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
test_value = spec.generate_value()
spec.validate(test_value)
class BoundedArraySpecTest(absltest.TestCase):
def testInvalidMinimum(self):
with six.assertRaisesRegex(self, ValueError, "not compatible"):
array_spec.BoundedArraySpec((3, 5), np.uint8, (0, 0, 0), (1, 1))
def testInvalidMaximum(self):
with six.assertRaisesRegex(self, ValueError, "not compatible"):
array_spec.BoundedArraySpec((3, 5), np.uint8, 0, (1, 1, 1))
def testMinMaxAttributes(self):
spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))
self.assertEqual(type(spec.minimum), np.ndarray)
self.assertEqual(type(spec.maximum), np.ndarray)
def testNotWriteable(self):
spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))
with six.assertRaisesRegex(self, ValueError, "read-only"):
spec.minimum[0] = -1
with six.assertRaisesRegex(self, ValueError, "read-only"):
spec.maximum[0] = 100
def testEqualBroadcastingBounds(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=0.0, maximum=1.0)
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertEqual(spec_1, spec_2)
def testNotEqualDifferentMinimum(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertNotEqual(spec_1, spec_2)
def testNotEqualOtherClass(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])
spec_2 = array_spec.ArraySpec((1, 2), np.int32)
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = None
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = ()
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
def testNotEqualDifferentMaximum(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=0.0, maximum=2.0)
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertNotEqual(spec_1, spec_2)
def testIsUnhashable(self):
spec = array_spec.BoundedArraySpec(
shape=(1, 2), dtype=np.int32, minimum=0.0, maximum=2.0)
with self.assertRaisesRegexp(TypeError, "unhashable type"):
hash(spec)
def testRepr(self):
as_string = repr(array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=101.0, maximum=73.0))
self.assertIn("101", as_string)
self.assertIn("73", as_string)
def testValidateBounds(self):
spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)
spec.validate(np.array([[5, 6], [8, 10]], dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.array([[5, 6], [8, 11]], dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.array([[4, 6], [8, 10]], dtype=np.int32))
def testGenerateValue(self):
spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)
test_value = spec.generate_value()
spec.validate(test_value)
def testScalarBounds(self):
spec = array_spec.BoundedArraySpec((), np.float, minimum=0.0, maximum=1.0)
self.assertIsInstance(spec.minimum, np.ndarray)
self.assertIsInstance(spec.maximum, np.ndarray)
self.assertEqual(0.0, spec.minimum)
self.assertEqual(1.0, spec.maximum)
_ = array_spec.BoundedArraySpec(
spec.shape, spec.dtype, spec.minimum, spec.maximum)
if __name__ == "__main__":
absltest.main()
| true
| true
|
f7193fe758b15f3c5dd4561b3624e25cf35dca18
| 4,536
|
py
|
Python
|
Model_test.py
|
JoeTao-097/Multi-REZ-Evalution-for-Breast-Ultrasound-Images
|
344d64ad2fe9d790c49e8005b3abee219d362278
|
[
"Apache-2.0"
] | null | null | null |
Model_test.py
|
JoeTao-097/Multi-REZ-Evalution-for-Breast-Ultrasound-Images
|
344d64ad2fe9d790c49e8005b3abee219d362278
|
[
"Apache-2.0"
] | null | null | null |
Model_test.py
|
JoeTao-097/Multi-REZ-Evalution-for-Breast-Ultrasound-Images
|
344d64ad2fe9d790c49e8005b3abee219d362278
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 2 17:32:52 2021
@author: jiangyt
"""
from Tools import *
from tensorflow import keras
from tensorflow.keras.layers import Dense, Activation, Flatten, Dropout, Input, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPooling2D, add, AveragePooling2D, ZeroPadding2D, GlobalAveragePooling2D
from tensorflow.keras.models import Model, Sequential
"""
Weight Dict
"""
Weight = {'Resnet50_448':"./model_checkpoints/ResNet50_448_checkpoints/20218131038.h5",
'MobileNet_224':"./model_checkpoints/MobileNet_224_checkpoints/202189956.h5",
'Xception_448':"./model_checkpoints/Xception_448_checkpoints/2021810951.h5",
'EfficientNet_B0_320':"./model_checkpoints/EfficientNetB0_320_checkpoints/2021871045.h5",
'DenseNet121_448':"./model_checkpoints/DenseNet121_448_checkpoints/2021891655.h5"}
"""
Load model
"""
df = pd.read_excel('./AI-Physician Comparasion Dataset.xlsx')
# df = pd.read_csv('/home/joe/Project/Breast_new/20210805_b_m_Xception_train/df_test_small.csv')
"""
Eval each model
"""
for key in Weight.keys():
if key == 'Resnet50_448':
from tensorflow.keras.applications.resnet50 import preprocess_input
backbone_model= keras.applications.resnet50.ResNet50(include_top=False, weights=None, input_tensor=None,
input_shape=(448, 448, 3), pooling=None, classes=2)
elif key == 'MobileNet_224':
from tensorflow.keras.applications.mobilenet import preprocess_input
backbone_model= keras.applications.mobilenet.MobileNet(include_top=False, weights=None, input_tensor=None,
input_shape=(224, 224, 3), pooling=None, classes=2)
elif key == 'Xception_448':
from tensorflow.keras.applications.xception import preprocess_input
backbone_model= keras.applications.xception.Xception(include_top=False, weights=None, input_tensor=None,
input_shape=(448, 448, 3), pooling=None, classes=2)
elif key == 'EfficientNet_B0_320':
from tensorflow.keras.applications.efficientnet import preprocess_input
backbone_model= keras.applications.efficientnet.EfficientNetB0(include_top=False, weights=None, input_tensor=None,
input_shape=(320, 320, 3), pooling=None, classes=2)
elif key == 'DenseNet121_448':
from tensorflow.keras.applications.densenet import preprocess_input
backbone_model = keras.applications.densenet.DenseNet121(include_top=False, weights="imagenet",
input_tensor=None,
input_shape=(448, 448, 3), pooling=None, classes=2)
else:
print('Error: No model weight find')
test_model = Sequential()
test_model.add(backbone_model)
test_model.add(GlobalAveragePooling2D())
test_model.add(Dense(2, activation='softmax', name='fc1'))
test_model.load_weights(Weight[key])
test_model.summary()
y_true = []
y_pred = []
for i in range(len(df)):
y_true.append(df['malignancy'][i])
x = Image.open(df['path'][i])
x = np.array(x)
x = zero_pad(x,int(key.split('_')[-1]))
x = preprocess_input(x)
x = x.reshape(1,x.shape[0],x.shape[1],x.shape[2])
y_pred.append(test_model.predict(x))
y_pred = np.array(y_pred)
y_pred = y_pred.reshape(y_pred.shape[0],2)
y_pred_1 = y_pred[:,1]
thresh_0=get_auc(0, np.array(y_true), np.array(y_pred_1), 'Malignancy', plot=False)
y_pred_comp_lvl=[1 if y>thresh_0 else 0 for y in y_pred_1]
cm_comp=confusion_matrix(y_true, y_pred_comp_lvl)
fig, axes = plt.subplots(nrows=2, ncols=2)
fig.tight_layout(pad=2, w_pad=2.)
fig.set_figheight(8)
fig.set_figwidth(7)
thresh_0=get_auc(axes[0, 0], np.array(y_true), np.array(y_pred_1), 'Performance of {}'.format(key))
thresh_AP=get_precision_recall(axes[0, 1], np.array(y_true), np.array(y_pred_1), 'Malignancy=0 vs 1')
plot_confusion_matrix(axes[1, 0], cm_comp, ["0", "1"], title='Malignancy', normalize=False)
plot_confusion_matrix(axes[1, 1], cm_comp, ["0", "1"], title='Malignancy (normalized)')
print('f1 score is: {:.3f}'.format(f1_score(y_true, y_pred_comp_lvl)))
| 47.747368
| 123
| 0.646825
|
from Tools import *
from tensorflow import keras
from tensorflow.keras.layers import Dense, Activation, Flatten, Dropout, Input, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPooling2D, add, AveragePooling2D, ZeroPadding2D, GlobalAveragePooling2D
from tensorflow.keras.models import Model, Sequential
Weight = {'Resnet50_448':"./model_checkpoints/ResNet50_448_checkpoints/20218131038.h5",
'MobileNet_224':"./model_checkpoints/MobileNet_224_checkpoints/202189956.h5",
'Xception_448':"./model_checkpoints/Xception_448_checkpoints/2021810951.h5",
'EfficientNet_B0_320':"./model_checkpoints/EfficientNetB0_320_checkpoints/2021871045.h5",
'DenseNet121_448':"./model_checkpoints/DenseNet121_448_checkpoints/2021891655.h5"}
df = pd.read_excel('./AI-Physician Comparasion Dataset.xlsx')
for key in Weight.keys():
if key == 'Resnet50_448':
from tensorflow.keras.applications.resnet50 import preprocess_input
backbone_model= keras.applications.resnet50.ResNet50(include_top=False, weights=None, input_tensor=None,
input_shape=(448, 448, 3), pooling=None, classes=2)
elif key == 'MobileNet_224':
from tensorflow.keras.applications.mobilenet import preprocess_input
backbone_model= keras.applications.mobilenet.MobileNet(include_top=False, weights=None, input_tensor=None,
input_shape=(224, 224, 3), pooling=None, classes=2)
elif key == 'Xception_448':
from tensorflow.keras.applications.xception import preprocess_input
backbone_model= keras.applications.xception.Xception(include_top=False, weights=None, input_tensor=None,
input_shape=(448, 448, 3), pooling=None, classes=2)
elif key == 'EfficientNet_B0_320':
from tensorflow.keras.applications.efficientnet import preprocess_input
backbone_model= keras.applications.efficientnet.EfficientNetB0(include_top=False, weights=None, input_tensor=None,
input_shape=(320, 320, 3), pooling=None, classes=2)
elif key == 'DenseNet121_448':
from tensorflow.keras.applications.densenet import preprocess_input
backbone_model = keras.applications.densenet.DenseNet121(include_top=False, weights="imagenet",
input_tensor=None,
input_shape=(448, 448, 3), pooling=None, classes=2)
else:
print('Error: No model weight find')
test_model = Sequential()
test_model.add(backbone_model)
test_model.add(GlobalAveragePooling2D())
test_model.add(Dense(2, activation='softmax', name='fc1'))
test_model.load_weights(Weight[key])
test_model.summary()
y_true = []
y_pred = []
for i in range(len(df)):
y_true.append(df['malignancy'][i])
x = Image.open(df['path'][i])
x = np.array(x)
x = zero_pad(x,int(key.split('_')[-1]))
x = preprocess_input(x)
x = x.reshape(1,x.shape[0],x.shape[1],x.shape[2])
y_pred.append(test_model.predict(x))
y_pred = np.array(y_pred)
y_pred = y_pred.reshape(y_pred.shape[0],2)
y_pred_1 = y_pred[:,1]
thresh_0=get_auc(0, np.array(y_true), np.array(y_pred_1), 'Malignancy', plot=False)
y_pred_comp_lvl=[1 if y>thresh_0 else 0 for y in y_pred_1]
cm_comp=confusion_matrix(y_true, y_pred_comp_lvl)
fig, axes = plt.subplots(nrows=2, ncols=2)
fig.tight_layout(pad=2, w_pad=2.)
fig.set_figheight(8)
fig.set_figwidth(7)
thresh_0=get_auc(axes[0, 0], np.array(y_true), np.array(y_pred_1), 'Performance of {}'.format(key))
thresh_AP=get_precision_recall(axes[0, 1], np.array(y_true), np.array(y_pred_1), 'Malignancy=0 vs 1')
plot_confusion_matrix(axes[1, 0], cm_comp, ["0", "1"], title='Malignancy', normalize=False)
plot_confusion_matrix(axes[1, 1], cm_comp, ["0", "1"], title='Malignancy (normalized)')
print('f1 score is: {:.3f}'.format(f1_score(y_true, y_pred_comp_lvl)))
| true
| true
|
f7194033eec50c8f02954cf2105d80b049769652
| 9,394
|
py
|
Python
|
prepare_data.py
|
Euro2xx/gansformer
|
83403cdb49e049e3b4d9f3472577f2ee73f7ba64
|
[
"MIT"
] | 1,172
|
2021-03-02T02:00:44.000Z
|
2022-03-31T02:46:45.000Z
|
prepare_data.py
|
Euro2xx/gansformer
|
83403cdb49e049e3b4d9f3472577f2ee73f7ba64
|
[
"MIT"
] | 37
|
2021-03-03T14:11:11.000Z
|
2022-03-12T15:40:15.000Z
|
prepare_data.py
|
Euro2xx/gansformer
|
83403cdb49e049e3b4d9f3472577f2ee73f7ba64
|
[
"MIT"
] | 138
|
2021-03-02T06:37:10.000Z
|
2022-03-30T14:59:09.000Z
|
# import warnings filter
from warnings import simplefilter
# ignore all future warnings
simplefilter(action = "ignore", category = FutureWarning)
import os
import sys
import tqdm
import time
import json
import glob
import gdown
import urllib
import zipfile
import hashlib
import argparse
import numpy as np
from training import misc
from dnnlib import EasyDict
import dataset_tool
catalog = {
"ffhq": EasyDict({
"name": "FFHQ", # Dataset name for logging
"filename": "ffhq-r08.tfrecords1of1", # Local file name
"url": "http://downloads.cs.stanford.edu/nlp/data/dorarad/ffhq-r08.tfrecords1of1", # download URL
"md5": "74de4f07dc7bfb07c0ad4471fdac5e67", # MD5 checksum to potentially skip download
"ratio": 1, # height/width ratio
"size": 13, # download size in GB
"shards": 1, # Number of tfrecord shards
"img_num": 70000 # Number of images
}),
"bedrooms": EasyDict({
"name": "LSUN-Bedrooms", # Dataset name for logging
"filename": "bedroom_train_lmdb.zip",
"url": "http://dl.yf.io/lsun/scenes/bedroom_train_lmdb.zip",
"md5": "f2c5d904a82a6295dbdccb322b4b0a99",
"dir": "bedroom_train_lmdb",
"ratio": 188/256,
"size": 43,
"shards": 64,
"img_num": 3033042,
"process": dataset_tool.create_from_lmdb # Function to convert download to tfrecords
}),
"cityscapes": EasyDict({
"name": "Cityscapes", # Dataset name for logging
"filename": "cityscapes.zip",
"url": "https://drive.google.com/uc?id=1t9Qhxm0iHFd3k-xTYEbKosSx_DkyoLLJ",
"md5": "953d231046275120dc1f73a5aebc9087",
"ratio": 0.5,
"size": 2,
"shards": 16,
"img_num": 25000
}),
"clevr": EasyDict({
"name": "CLEVR", # Dataset name for logging
"filename": "clevr.zip",
"url": "https://drive.google.com/uc?id=1lY4JE30yk26v0MWHNpXBOMzltufUcTXj",
"md5": "3040bb20a29cd2f0e1e9231aebddf2a1",
"size": 6,
"ratio": 0.75,
"shards": 5,
"img_num": 100000
##########################################################################################
# Currently, we download preprocessed TFrecords of CLEVR images with image ratio 0.75.
# To process instead the dataset from scratch (with the original image ratio of 320/480), add the following:
# "filename": "CLEVR_v1.0.zip",
# "size": 18,
# "dir": "CLEVR_v1.0/images", # Image directory to process while turning into tfrecords
# "url": "https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
# "md5": "b11922020e72d0cd9154779b2d3d07d2",
# "process": dataset_tool.create_from_imgs # Function to convert download to tfrecords
})
}
formats_catalog = {
"png": lambda tfdir, imgdir, **kwargs: dataset_tool.create_from_imgs(tfdir, imgdir, format = "png", **kwargs),
"jpg": lambda tfdir, imgdir, **kwargs: dataset_tool.create_from_imgs(tfdir, imgdir, format = "jpg", **kwargs),
"npy": dataset_tool.create_from_npy,
"hdf5": dataset_tool.create_from_hdf5,
"tfds": dataset_tool.create_from_tfds,
"lmdb": dataset_tool.create_from_lmdb
}
def mkdir(d):
if not os.path.exists(d):
try:
os.makedirs(d)
except:
pass
def verify_md5(filename, md5):
print("Verify MD5 for {}...".format(filename))
with open(filename, "rb") as f:
new_md5 = hashlib.md5(f.read()).hexdigest()
result = md5 == new_md5
if result:
print(misc.bold("MD5 matches!"))
else:
print("MD5 doesn't match. Will redownload the file.")
return result
def is_unzipped(zip, dir):
with zipfile.ZipFile(zip) as zf:
archive = zf.namelist()
all_exist = all(os.path.exists("{}/{}".format(dir, file)) for file in archive)
return all_exist
def unzip(zip, dir):
with zipfile.ZipFile(zip) as zf:
for member in tqdm.tqdm(zf.infolist(), desc = "Extracting "):
try:
zf.extract(member, dir)
except zipfile.error as e:
pass
def get_path(url, dir = None, path = None):
if path is None:
path = url.split("/")[-1]
if dir is not None:
path = "{}/{}".format(dir, path)
return path
def download_file(url, path, block_sz = 8192):
if "drive.google.com" in url:
gdown.download(url, path)
else:
u = urllib.request.urlopen(url)
with open(path, "wb") as f:
fsize = int(u.info().get_all("Content-Length")[0])
print("Downloading: %s Bytes: %s" % (path, fsize))
curr = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
curr += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (curr, curr * 100. / fsize)
status += chr(8) * (len(status) + 1)
print(status, end = "", flush = True)
def prepare(tasks, data_dir, shards_num = 1, max_images = None,
ratio = 1.0, images_dir = None, format = None): # Options for custom dataset
mkdir(data_dir)
for task in tasks:
# If task not in catalog, create custom task configuration
c = catalog.get(task, EasyDict({
"local": True,
"name": task,
"dir": images_dir,
"ratio": ratio,
"process": formats_catalog.get(format)
}))
dirname = "{}/{}".format(data_dir, task)
mkdir(dirname)
# try:
print(misc.bold("Preparing the {} dataset...".format(c.name)))
if "local" not in c:
fname = "{}/{}".format(dirname, c.filename)
download = not ((os.path.exists(fname) and verify_md5(fname, c.md5)))
path = get_path(c.url, dirname, path = c.filename)
if download:
print(misc.bold("Downloading the data ({} GB)...".format(c.size)))
download_file(c.url, path)
# print(misc.bold("Completed downloading {}".format(c.name)))
if path.endswith(".zip"):
if not is_unzipped(path, dirname):
print(misc.bold("Unzipping {}...".format(path)))
unzip(path, dirname)
# print(misc.bold("Completed unzipping {}".format(path)))
if "process" in c:
imgdir = images_dir if "local" in c else ("{}/{}".format(dirname, c.dir))
shards_num = c.shards if max_images is None else shards_num
c.process(dirname, imgdir, ratio = c.ratio,
shards_num = shards_num, max_imgs = max_images)
print(misc.bcolored("Completed preparations for {}!".format(c.name), "blue"))
# except:
# print(misc.bcolored("Had an error in preparing the {} dataset. Will move on.".format(c.name), "red"))
# print(sys.exc_info())
def run_cmdline(argv):
parser = argparse.ArgumentParser(prog = argv[0], description = "Download and prepare data for the GANformer.")
parser.add_argument("--data-dir", help = "Directory of created dataset", default = "datasets", type = str)
parser.add_argument("--shards-num", help = "Number of shards to split each dataset to (optional)", default = 1, type = int)
parser.add_argument("--max-images", help = "Maximum number of images to have in the dataset (optional). Use to reduce the produced tfrecords file size", default = None, type = int)
# Default tasks
parser.add_argument("--clevr", help = "Prepare the CLEVR dataset (6.41GB download, 31GB tfrecords, 100k images)", dest = "tasks", action = "append_const", const = "clevr")
parser.add_argument("--bedrooms", help = "Prepare the LSUN-bedrooms dataset (42.8GB download, up to 480GB tfrecords, 3M images)", dest = "tasks", action = "append_const", const = "bedrooms")
parser.add_argument("--ffhq", help = "Prepare the FFHQ dataset (13GB download, 13GB tfrecords, 70k images)", dest = "tasks", action = "append_const", const = "ffhq")
parser.add_argument("--cityscapes", help = "Prepare the cityscapes dataset (1.8GB download, 8GB tfrecords, 25k images)", dest = "tasks", action = "append_const", const = "cityscapes")
# Create a new task with custom images
parser.add_argument("--task", help = "New dataset name", type = str, dest = "tasks", action = "append")
parser.add_argument("--images-dir", help = "Provide source image directory to convert into tfrecords (will be searched recursively)", default = None, type = str)
parser.add_argument("--format", help = "Images format", default = None, choices = ["png", "jpg", "npy", "hdf5", "tfds", "lmdb"], type = str)
parser.add_argument("--ratio", help = "Images height/width", default = 1.0, type = float)
args = parser.parse_args()
if not args.tasks:
misc.error("No tasks specified. Please see '-h' for help.")
if args.max_images < 50000:
misc.log("Warning: max-images is set to {}. We recommend setting it at least to 50,000 to allow statistically correct computation of the FID-50k metric.".format(args.max_images), "red")
prepare(**vars(args))
if __name__ == "__main__":
run_cmdline(sys.argv)
| 43.091743
| 200
| 0.597828
|
from warnings import simplefilter
simplefilter(action = "ignore", category = FutureWarning)
import os
import sys
import tqdm
import time
import json
import glob
import gdown
import urllib
import zipfile
import hashlib
import argparse
import numpy as np
from training import misc
from dnnlib import EasyDict
import dataset_tool
catalog = {
"ffhq": EasyDict({
"name": "FFHQ",
"filename": "ffhq-r08.tfrecords1of1",
"url": "http://downloads.cs.stanford.edu/nlp/data/dorarad/ffhq-r08.tfrecords1of1",
"md5": "74de4f07dc7bfb07c0ad4471fdac5e67",
"ratio": 1,
"size": 13,
"shards": 1,
"img_num": 70000
}),
"bedrooms": EasyDict({
"name": "LSUN-Bedrooms",
"filename": "bedroom_train_lmdb.zip",
"url": "http://dl.yf.io/lsun/scenes/bedroom_train_lmdb.zip",
"md5": "f2c5d904a82a6295dbdccb322b4b0a99",
"dir": "bedroom_train_lmdb",
"ratio": 188/256,
"size": 43,
"shards": 64,
"img_num": 3033042,
"process": dataset_tool.create_from_lmdb
}),
"cityscapes": EasyDict({
"name": "Cityscapes",
"filename": "cityscapes.zip",
"url": "https://drive.google.com/uc?id=1t9Qhxm0iHFd3k-xTYEbKosSx_DkyoLLJ",
"md5": "953d231046275120dc1f73a5aebc9087",
"ratio": 0.5,
"size": 2,
"shards": 16,
"img_num": 25000
}),
"clevr": EasyDict({
"name": "CLEVR",
"filename": "clevr.zip",
"url": "https://drive.google.com/uc?id=1lY4JE30yk26v0MWHNpXBOMzltufUcTXj",
"md5": "3040bb20a29cd2f0e1e9231aebddf2a1",
"size": 6,
"ratio": 0.75,
"shards": 5,
"img_num": 100000
".format(c.name), "blue"))
# except:
# print(misc.bcolored("Had an error in preparing the {} dataset. Will move on.".format(c.name), "red"))
# print(sys.exc_info())
def run_cmdline(argv):
parser = argparse.ArgumentParser(prog = argv[0], description = "Download and prepare data for the GANformer.")
parser.add_argument("--data-dir", help = "Directory of created dataset", default = "datasets", type = str)
parser.add_argument("--shards-num", help = "Number of shards to split each dataset to (optional)", default = 1, type = int)
parser.add_argument("--max-images", help = "Maximum number of images to have in the dataset (optional). Use to reduce the produced tfrecords file size", default = None, type = int)
# Default tasks
parser.add_argument("--clevr", help = "Prepare the CLEVR dataset (6.41GB download, 31GB tfrecords, 100k images)", dest = "tasks", action = "append_const", const = "clevr")
parser.add_argument("--bedrooms", help = "Prepare the LSUN-bedrooms dataset (42.8GB download, up to 480GB tfrecords, 3M images)", dest = "tasks", action = "append_const", const = "bedrooms")
parser.add_argument("--ffhq", help = "Prepare the FFHQ dataset (13GB download, 13GB tfrecords, 70k images)", dest = "tasks", action = "append_const", const = "ffhq")
parser.add_argument("--cityscapes", help = "Prepare the cityscapes dataset (1.8GB download, 8GB tfrecords, 25k images)", dest = "tasks", action = "append_const", const = "cityscapes")
# Create a new task with custom images
parser.add_argument("--task", help = "New dataset name", type = str, dest = "tasks", action = "append")
parser.add_argument("--images-dir", help = "Provide source image directory to convert into tfrecords (will be searched recursively)", default = None, type = str)
parser.add_argument("--format", help = "Images format", default = None, choices = ["png", "jpg", "npy", "hdf5", "tfds", "lmdb"], type = str)
parser.add_argument("--ratio", help = "Images height/width", default = 1.0, type = float)
args = parser.parse_args()
if not args.tasks:
misc.error("No tasks specified. Please see '-h' for help.")
if args.max_images < 50000:
misc.log("Warning: max-images is set to {}. We recommend setting it at least to 50,000 to allow statistically correct computation of the FID-50k metric.".format(args.max_images), "red")
prepare(**vars(args))
if __name__ == "__main__":
run_cmdline(sys.argv)
| true
| true
|
f71940e708f2c32d5f339366201b373baa9e265f
| 5,092
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/series/indexing/test_get.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/series/indexing/test_get.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/series/indexing/test_get.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
def test_get():
# GH 6383
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
)
)
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
),
index=pd.Float64Index(
[
25.0,
36.0,
49.0,
64.0,
81.0,
100.0,
121.0,
144.0,
169.0,
196.0,
1225.0,
1296.0,
1369.0,
1444.0,
1521.0,
1600.0,
1681.0,
1764.0,
1849.0,
1936.0,
]
),
)
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default="Missing")
assert result == "Missing"
vc = df.b.value_counts()
result = vc.get(False, default="Missing")
assert result == 3
result = vc.get(True, default="Missing")
assert result == "Missing"
def test_get_nan():
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default="Missing") == "Missing"
def test_get_nan_multiple():
# GH 8569
# ensure that fixing "test_get_nan" above hasn't broken get
# with multiple elements
s = pd.Float64Index(range(10)).to_series()
idx = [2, 30]
assert s.get(idx) is None
idx = [2, np.nan]
assert s.get(idx) is None
# GH 17295 - all missing keys
idx = [20, 30]
assert s.get(idx) is None
idx = [np.nan, np.nan]
assert s.get(idx) is None
def test_get_with_default():
# GH#7725
d0 = ["a", "b", "c", "d"]
d1 = np.arange(4, dtype="int64")
others = ["e", 10]
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
assert s.get(i) == d
assert s.get(i, d) == d
assert s.get(i, "z") == d
for other in others:
assert s.get(other, "z") == "z"
assert s.get(other, other) == other
@pytest.mark.parametrize(
"arr",
[np.random.randn(10), tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")],
)
def test_get2(arr):
# TODO: better name, possibly split
# GH#21260
ser = Series(arr, index=[2 * i for i in range(len(arr))])
assert ser.get(4) == ser.iloc[2]
result = ser.get([4, 6])
expected = ser.iloc[[2, 3]]
tm.assert_series_equal(result, expected)
result = ser.get(slice(2))
expected = ser.iloc[[0, 1]]
tm.assert_series_equal(result, expected)
assert ser.get(-1) is None
assert ser.get(ser.index.max() + 1) is None
ser = Series(arr[:6], index=list("abcdef"))
assert ser.get("c") == ser.iloc[2]
result = ser.get(slice("b", "d"))
expected = ser.iloc[[1, 2, 3]]
tm.assert_series_equal(result, expected)
result = ser.get("Z")
assert result is None
assert ser.get(4) == ser.iloc[4]
assert ser.get(-1) == ser.iloc[-1]
assert ser.get(len(ser)) is None
# GH#21257
ser = Series(arr)
ser2 = ser[::2]
assert ser2.get(1) is None
def test_getitem_get(string_series, object_series):
for obj in [string_series, object_series]:
idx = obj.index[5]
assert obj[idx] == obj.get(idx)
assert obj[idx] == obj[5]
assert string_series.get(-1) == string_series.get(string_series.index[-1])
assert string_series[5] == string_series.get(string_series.index[5])
def test_get_none():
# GH#5652
s1 = Series(dtype=object)
s2 = Series(dtype=object, index=list("abc"))
for s in [s1, s2]:
result = s.get(None)
assert result is None
| 23.683721
| 88
| 0.434014
|
import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
def test_get():
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
)
)
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
),
index=pd.Float64Index(
[
25.0,
36.0,
49.0,
64.0,
81.0,
100.0,
121.0,
144.0,
169.0,
196.0,
1225.0,
1296.0,
1369.0,
1444.0,
1521.0,
1600.0,
1681.0,
1764.0,
1849.0,
1936.0,
]
),
)
result = s.get(25, 0)
expected = 43
assert result == expected
df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default="Missing")
assert result == "Missing"
vc = df.b.value_counts()
result = vc.get(False, default="Missing")
assert result == 3
result = vc.get(True, default="Missing")
assert result == "Missing"
def test_get_nan():
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default="Missing") == "Missing"
def test_get_nan_multiple():
# with multiple elements
s = pd.Float64Index(range(10)).to_series()
idx = [2, 30]
assert s.get(idx) is None
idx = [2, np.nan]
assert s.get(idx) is None
# GH 17295 - all missing keys
idx = [20, 30]
assert s.get(idx) is None
idx = [np.nan, np.nan]
assert s.get(idx) is None
def test_get_with_default():
# GH#7725
d0 = ["a", "b", "c", "d"]
d1 = np.arange(4, dtype="int64")
others = ["e", 10]
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
assert s.get(i) == d
assert s.get(i, d) == d
assert s.get(i, "z") == d
for other in others:
assert s.get(other, "z") == "z"
assert s.get(other, other) == other
@pytest.mark.parametrize(
"arr",
[np.random.randn(10), tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")],
)
def test_get2(arr):
# TODO: better name, possibly split
# GH#21260
ser = Series(arr, index=[2 * i for i in range(len(arr))])
assert ser.get(4) == ser.iloc[2]
result = ser.get([4, 6])
expected = ser.iloc[[2, 3]]
tm.assert_series_equal(result, expected)
result = ser.get(slice(2))
expected = ser.iloc[[0, 1]]
tm.assert_series_equal(result, expected)
assert ser.get(-1) is None
assert ser.get(ser.index.max() + 1) is None
ser = Series(arr[:6], index=list("abcdef"))
assert ser.get("c") == ser.iloc[2]
result = ser.get(slice("b", "d"))
expected = ser.iloc[[1, 2, 3]]
tm.assert_series_equal(result, expected)
result = ser.get("Z")
assert result is None
assert ser.get(4) == ser.iloc[4]
assert ser.get(-1) == ser.iloc[-1]
assert ser.get(len(ser)) is None
# GH#21257
ser = Series(arr)
ser2 = ser[::2]
assert ser2.get(1) is None
def test_getitem_get(string_series, object_series):
for obj in [string_series, object_series]:
idx = obj.index[5]
assert obj[idx] == obj.get(idx)
assert obj[idx] == obj[5]
assert string_series.get(-1) == string_series.get(string_series.index[-1])
assert string_series[5] == string_series.get(string_series.index[5])
def test_get_none():
# GH#5652
s1 = Series(dtype=object)
s2 = Series(dtype=object, index=list("abc"))
for s in [s1, s2]:
result = s.get(None)
assert result is None
| true
| true
|
f71940f58176a96989ec11a785988a65b24d32a1
| 1,157
|
py
|
Python
|
venv/Scripts/get_first_company.py
|
ZUSM0/zynoPYsis
|
0cbe84d60a9611eac4daed82176477939bf79183
|
[
"MIT"
] | null | null | null |
venv/Scripts/get_first_company.py
|
ZUSM0/zynoPYsis
|
0cbe84d60a9611eac4daed82176477939bf79183
|
[
"MIT"
] | null | null | null |
venv/Scripts/get_first_company.py
|
ZUSM0/zynoPYsis
|
0cbe84d60a9611eac4daed82176477939bf79183
|
[
"MIT"
] | null | null | null |
#!c:\users\user\pycharmprojects\sinopse de filmes\venv\scripts\python.exe
# -*- coding: utf-8 -*-
"""
get_first_company.py
Usage: get_first_company "company name"
Search for the given name and print the best matching result.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print('You need to install the IMDbPY package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "company name"' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
try:
# Do the search, and get the results (a list of company objects).
results = i.search_company(name)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
if not results:
print('No matches for "%s", sorry.' % name)
sys.exit(0)
# Print only the first result.
print(' Best match for "%s"' % name)
# This is a company instance.
company = results[0]
# So far the company object only contains basic information like the
# name; retrieve main information:
i.update(company)
print(company.summary())
| 21.425926
| 79
| 0.684529
|
import sys
try:
import imdb
except ImportError:
print('You need to install the IMDbPY package!')
sys.exit(1)
if len(sys.argv) != 2:
print('Only one argument is required:')
print(' %s "company name"' % sys.argv[0])
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
try:
results = i.search_company(name)
except imdb.IMDbError as e:
print("Probably you're not connected to Internet. Complete error report:")
print(e)
sys.exit(3)
if not results:
print('No matches for "%s", sorry.' % name)
sys.exit(0)
# Print only the first result.
print(' Best match for "%s"' % name)
# This is a company instance.
company = results[0]
# So far the company object only contains basic information like the
# name; retrieve main information:
i.update(company)
print(company.summary())
| true
| true
|
f7194132aff142671554e00669a4cc1f9a014680
| 5,299
|
py
|
Python
|
MathExample.py
|
AdityaSavara/CiteSoft_Py
|
f3a68666966565d4eb130e457cb11d285b56b4c5
|
[
"BSD-3-Clause"
] | null | null | null |
MathExample.py
|
AdityaSavara/CiteSoft_Py
|
f3a68666966565d4eb130e457cb11d285b56b4c5
|
[
"BSD-3-Clause"
] | null | null | null |
MathExample.py
|
AdityaSavara/CiteSoft_Py
|
f3a68666966565d4eb130e457cb11d285b56b4c5
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import math
try:
import CiteSoft
except:
import os #The below lines are to allow CiteSoftLocal to be called regardless of user's working directory.
lenOfFileName = len(os.path.basename(__file__)) #This is the name of **this** file.
absPathWithoutFileName = os.path.abspath(__file__)[0:-1*lenOfFileName]
sys.path.append(absPathWithoutFileName)
import CiteSoftLocal as CiteSoft
#Here CiteSoft is used with an example module called "MathExample"
#Note that the unique_id should be something truly unique (no other software would use it).
#Typically, unique_id is a DOI or a URL.
#The author field is typically a list object with names as strings, but can also just be a single string.
#Note that there is a function called sqrt which uses the python math module, and uses a *different* citation.
software_name = "CiteSoft Math Example"
version = "1.0.0"
MathExample_unique_id = "https://github.com/AdityaSavara/CiteSoft_py/blob/master/MathExample.py"
kwargs = {"version": version, "author": ["Aditya Savara", "CPH"], "url": "https://github.com/AdityaSavara/CiteSoft_py/blob/master/MathExample.py"}
#The below line will cause this module's citation to be exported any time the module is imported.
#The 'write_immediately = True' causes the checkpoint to be written at the time of export rather than stored.
CiteSoft.import_cite(unique_id=MathExample_unique_id, software_name="MathLib Example", write_immediately=True, **kwargs)
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def add(num1, num2):
return num1 + num2
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def subtract(num1, num2):
return num1 - num2
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def multiply(num1, num2):
return num1 * num2
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def divide(num1, num2):
return num1 / num2
@CiteSoft.after_call_compile_consolidated_log() #This will cause the consolidated log to be complied after the mean function is called. #note that we put it after the function_call_cite so that it is a wrapper around that wrapper and occurs second.
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="MathLib Example", **kwargs)
def mean(list_of_num):
result = 0
for num in list_of_num:
result = add(result, num)
result = divide(result, len(list_of_num))
return result
math_unique_id = "https://docs.python.org/3/library/math.html"
math_software_name = "The Python Library Reference: Mathematical functions"
math_version = str(sys.version).split("|")[0] #This is the python version.
math_kwargs = {"version": math_version, "author": "Van Rossum, Guido", "cite": "Van Rossum, G. (2020). The Python Library Reference, release 3.8.2. Python Software Foundation.", "url": "https://docs.python.org/3/library/math.html"}
@CiteSoft.function_call_cite(unique_id=math_unique_id, software_name=math_software_name, **math_kwargs)
def sqrt(num):
return math.sqrt(num)
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def sqr(num):
return multiply(num, num)
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def sample_variance(list_of_num):
meanVal = mean(list_of_num)
result = 0
for num in list_of_num:
result = add(result, sqr(subtract(num, meanVal)))
result = divide(result, (len(list_of_num) - 1))
return result
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def std_dev(list_of_num):
return sqrt(sample_variance(list_of_num))
@CiteSoft.after_call_compile_consolidated_log() #This will cause the consolidated log to be complied after the mean function is called. #note that we put it after the function_call_cite so that it is a wrapper around that wrapper and occurs second.
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def cite_me(): #This is just an example of how a package creating dev-user could make a function that other dev-users relying on their package could call at the very end of doing everything, so that no calls to CiteSoft would need to occur during runtime.
pass
#note that the above lines of code simply add to the file CiteSoftwareCheckPoints
#if one wants to create a consolidated log that removes duplicates, one can call a CiteSoft function
#This is considered appropriate to do at the end of a complicated program, but is not necessary.
#it would have been possible to also use decorators on any of the above functions, like @CiteSoft.after_call_compile_checkpoints_log or @CiteSoft.after_call_compile_consolidated_log. Note that chained/stacked decorators are performed in "first in last out" order, since they are wrappers on wrappers. So if a function has both @CiteSoft.function_call_cite and @after_call_compile_consolidated_log, the @CiteSoft.function_call_cite should be second.
def export_citation_checkpoints(filepath=""):
if filepath is not "":
CiteSoft.compile_checkpoints_log(filepath)
else:
CiteSoft.compile_checkpoints_log()
| 58.230769
| 449
| 0.780336
|
import sys
import math
try:
import CiteSoft
except:
import os
lenOfFileName = len(os.path.basename(__file__)) #This is the name of **this** file.
absPathWithoutFileName = os.path.abspath(__file__)[0:-1*lenOfFileName]
sys.path.append(absPathWithoutFileName)
import CiteSoftLocal as CiteSoft
#Here CiteSoft is used with an example module called "MathExample"
#Note that the unique_id should be something truly unique (no other software would use it).
#Typically, unique_id is a DOI or a URL.
#The author field is typically a list object with names as strings, but can also just be a single string.
#Note that there is a function called sqrt which uses the python math module, and uses a *different* citation.
software_name = "CiteSoft Math Example"
version = "1.0.0"
MathExample_unique_id = "https://github.com/AdityaSavara/CiteSoft_py/blob/master/MathExample.py"
kwargs = {"version": version, "author": ["Aditya Savara", "CPH"], "url": "https://github.com/AdityaSavara/CiteSoft_py/blob/master/MathExample.py"}
#The below line will cause this module's citation to be exported any time the module is imported.
CiteSoft.import_cite(unique_id=MathExample_unique_id, software_name="MathLib Example", write_immediately=True, **kwargs)
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def add(num1, num2):
return num1 + num2
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def subtract(num1, num2):
return num1 - num2
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def multiply(num1, num2):
return num1 * num2
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def divide(num1, num2):
return num1 / num2
@CiteSoft.after_call_compile_consolidated_log() an(list_of_num):
result = 0
for num in list_of_num:
result = add(result, num)
result = divide(result, len(list_of_num))
return result
math_unique_id = "https://docs.python.org/3/library/math.html"
math_software_name = "The Python Library Reference: Mathematical functions"
math_version = str(sys.version).split("|")[0]
math_kwargs = {"version": math_version, "author": "Van Rossum, Guido", "cite": "Van Rossum, G. (2020). The Python Library Reference, release 3.8.2. Python Software Foundation.", "url": "https://docs.python.org/3/library/math.html"}
@CiteSoft.function_call_cite(unique_id=math_unique_id, software_name=math_software_name, **math_kwargs)
def sqrt(num):
return math.sqrt(num)
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def sqr(num):
return multiply(num, num)
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def sample_variance(list_of_num):
meanVal = mean(list_of_num)
result = 0
for num in list_of_num:
result = add(result, sqr(subtract(num, meanVal)))
result = divide(result, (len(list_of_num) - 1))
return result
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def std_dev(list_of_num):
return sqrt(sample_variance(list_of_num))
@CiteSoft.after_call_compile_consolidated_log() export_citation_checkpoints(filepath=""):
if filepath is not "":
CiteSoft.compile_checkpoints_log(filepath)
else:
CiteSoft.compile_checkpoints_log()
| true
| true
|
f719414c0fab3fa82e996e4adda1253ef5777ac1
| 141
|
py
|
Python
|
performance_test/torch_deploy/__init__.py
|
Lionelsy/SQL-Injection-Detection-via-Deep-Learning
|
5f1958822af98a99172df524eef6e921e6fa6724
|
[
"MIT"
] | 1
|
2022-01-18T17:08:52.000Z
|
2022-01-18T17:08:52.000Z
|
performance_test/torch_deploy/__init__.py
|
Lionelsy/SQL-Injection-Detection-via-Deep-Learning
|
5f1958822af98a99172df524eef6e921e6fa6724
|
[
"MIT"
] | null | null | null |
performance_test/torch_deploy/__init__.py
|
Lionelsy/SQL-Injection-Detection-via-Deep-Learning
|
5f1958822af98a99172df524eef6e921e6fa6724
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/12/14
# @Author : Shuyu ZHANG
# @FileName: __init__.py
# @Software: PyCharm
# @Description: Here
| 17.625
| 24
| 0.609929
| true
| true
|
|
f71941bb492d02c50d6770e3abc0974c97079478
| 1,354
|
py
|
Python
|
rclpy/services/minimal_client/setup.py
|
flynneva/examples
|
16bffa238dfa3ff305f14b1ec75ed41dce634ffb
|
[
"Apache-2.0"
] | null | null | null |
rclpy/services/minimal_client/setup.py
|
flynneva/examples
|
16bffa238dfa3ff305f14b1ec75ed41dce634ffb
|
[
"Apache-2.0"
] | null | null | null |
rclpy/services/minimal_client/setup.py
|
flynneva/examples
|
16bffa238dfa3ff305f14b1ec75ed41dce634ffb
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
package_name = 'examples_rclpy_minimal_client'
setup(
name=package_name,
version='0.9.1',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='Mikael Arguedas',
author_email='mikael@osrfoundation.org',
maintainer='Mikael Arguedas',
maintainer_email='mikael@osrfoundation.org',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Examples of minimal service clients using rclpy.',
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'client = examples_rclpy_minimal_client.client:main',
'client_async = examples_rclpy_minimal_client.client_async:main',
'client_async_member_function ='
' examples_rclpy_minimal_client.client_async_member_function:main',
'client_async_callback = examples_rclpy_minimal_client.client_async_callback:main',
],
},
)
| 33.85
| 95
| 0.661004
|
from setuptools import setup
package_name = 'examples_rclpy_minimal_client'
setup(
name=package_name,
version='0.9.1',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='Mikael Arguedas',
author_email='mikael@osrfoundation.org',
maintainer='Mikael Arguedas',
maintainer_email='mikael@osrfoundation.org',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Examples of minimal service clients using rclpy.',
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'client = examples_rclpy_minimal_client.client:main',
'client_async = examples_rclpy_minimal_client.client_async:main',
'client_async_member_function ='
' examples_rclpy_minimal_client.client_async_member_function:main',
'client_async_callback = examples_rclpy_minimal_client.client_async_callback:main',
],
},
)
| true
| true
|
f71942098bb86e0175943366277c2cc371a401db
| 1,735
|
py
|
Python
|
api/migrations/0001_initial.py
|
psingla1210/django-rest-api
|
db9fa70e3eeb747399b275e79688dfa4974a00ee
|
[
"MIT"
] | null | null | null |
api/migrations/0001_initial.py
|
psingla1210/django-rest-api
|
db9fa70e3eeb747399b275e79688dfa4974a00ee
|
[
"MIT"
] | 4
|
2021-03-19T11:31:00.000Z
|
2022-02-10T14:07:26.000Z
|
api/migrations/0001_initial.py
|
psingla1210/django-rest-api
|
db9fa70e3eeb747399b275e79688dfa4974a00ee
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2020-06-21 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='TwistedUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.CharField(max_length=256, unique=True, verbose_name='email address')),
('name', models.CharField(max_length=256)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 51.029412
| 266
| 0.641499
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='TwistedUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.CharField(max_length=256, unique=True, verbose_name='email address')),
('name', models.CharField(max_length=256)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true
| true
|
f71942734c2781f9110c7944a358f060b7b5ed9c
| 1,138
|
py
|
Python
|
frappe/utils/connections.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/utils/connections.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/utils/connections.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
import socket
from six.moves.urllib.parse import urlparse
from frappe import get_conf
REDIS_KEYS = ("redis_cache", "redis_queue", "redis_socketio")
def is_open(ip, port, timeout=10):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((ip, int(port)))
s.shutdown(socket.SHUT_RDWR)
return True
except socket.error:
return False
finally:
s.close()
def check_database():
config = get_conf()
db_type = config.get("db_type", "mariadb")
db_host = config.get("db_host", "localhost")
db_port = config.get("db_port", 3306 if db_type == "mariadb" else 5432)
return {db_type: is_open(db_host, db_port)}
def check_redis(redis_services=None):
config = get_conf()
services = redis_services or REDIS_KEYS
status = {}
for conn in services:
redis_url = urlparse(config.get(conn)).netloc
redis_host, redis_port = redis_url.split(":")
status[conn] = is_open(redis_host, redis_port)
return status
def check_connection(redis_services=None):
service_status = {}
service_status.update(check_database())
service_status.update(check_redis(redis_services))
return service_status
| 24.212766
| 72
| 0.746924
|
import socket
from six.moves.urllib.parse import urlparse
from frappe import get_conf
REDIS_KEYS = ("redis_cache", "redis_queue", "redis_socketio")
def is_open(ip, port, timeout=10):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((ip, int(port)))
s.shutdown(socket.SHUT_RDWR)
return True
except socket.error:
return False
finally:
s.close()
def check_database():
config = get_conf()
db_type = config.get("db_type", "mariadb")
db_host = config.get("db_host", "localhost")
db_port = config.get("db_port", 3306 if db_type == "mariadb" else 5432)
return {db_type: is_open(db_host, db_port)}
def check_redis(redis_services=None):
config = get_conf()
services = redis_services or REDIS_KEYS
status = {}
for conn in services:
redis_url = urlparse(config.get(conn)).netloc
redis_host, redis_port = redis_url.split(":")
status[conn] = is_open(redis_host, redis_port)
return status
def check_connection(redis_services=None):
service_status = {}
service_status.update(check_database())
service_status.update(check_redis(redis_services))
return service_status
| true
| true
|
f71943a375acd8972ca01d50dd31b6166bd90ebe
| 606
|
py
|
Python
|
demo.py
|
peter-jim/offchain-algorithm
|
c148fd1e3dffca9a42a4206c516533aae51d1ae1
|
[
"Apache-2.0"
] | null | null | null |
demo.py
|
peter-jim/offchain-algorithm
|
c148fd1e3dffca9a42a4206c516533aae51d1ae1
|
[
"Apache-2.0"
] | null | null | null |
demo.py
|
peter-jim/offchain-algorithm
|
c148fd1e3dffca9a42a4206c516533aae51d1ae1
|
[
"Apache-2.0"
] | 1
|
2021-04-17T06:34:32.000Z
|
2021-04-17T06:34:32.000Z
|
import requests
def demo():
'''
非小号API获取信息
:return btc,eth,eos ... pirce
'''
print("start")
#接口教程链接 https://github.com/xiaohao2019/API-docs/blob/master/PublicApi_CN.md
url = "https://fxhapi.feixiaohao.com/public/v1/ticker/"
#传入参数 start=[integer](指定结果集的开始排名) limit=[integer](指定结果集的最大数量)
start = "start=" + str(0)+"&"
limit = "limit=" + str(10)
print(url+"?"+start+limit)
try:
response = requests.get(url=url+"?"+start+limit)
for item in response.json():
print(item)
print("获取完毕")
except:
print('error')
| 23.307692
| 79
| 0.580858
|
import requests
def demo():
print("start")
url = "https://fxhapi.feixiaohao.com/public/v1/ticker/"
start = "start=" + str(0)+"&"
limit = "limit=" + str(10)
print(url+"?"+start+limit)
try:
response = requests.get(url=url+"?"+start+limit)
for item in response.json():
print(item)
print("获取完毕")
except:
print('error')
| true
| true
|
f7194440cda8d5fbed1979d6f1691cf0e20dcc13
| 1,544
|
py
|
Python
|
benchmark/Python/Savina/PingPong.py
|
Feliix42/lingua-franca
|
af312ca8d37d9246dcb1d77fdc254a0dbd61b2bc
|
[
"BSD-2-Clause"
] | 1
|
2020-11-13T02:05:57.000Z
|
2020-11-13T02:05:57.000Z
|
benchmark/Python/Savina/PingPong.py
|
Feliix42/lingua-franca
|
af312ca8d37d9246dcb1d77fdc254a0dbd61b2bc
|
[
"BSD-2-Clause"
] | null | null | null |
benchmark/Python/Savina/PingPong.py
|
Feliix42/lingua-franca
|
af312ca8d37d9246dcb1d77fdc254a0dbd61b2bc
|
[
"BSD-2-Clause"
] | 1
|
2020-10-20T12:30:38.000Z
|
2020-10-20T12:30:38.000Z
|
from LinguaFrancaBase.constants import * #Useful constants
from LinguaFrancaBase.functions import * #Useful helper functions
from LinguaFrancaBase.classes import * #Useful classes
import sys
import copy
sys.setrecursionlimit(100000)
EXPECTED = 10000
class _Ping:
count = 1000000
pingsLeft = count
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def reaction_function_1(self):
self.pingsLeft -= 1
if self.pingsLeft > 0:
pingpong_pong_lf.reaction_function_0(self.pingsLeft)
else:
exit()
return 0
class _Pong:
expected = 1000000
count = 0
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def reaction_function_0(self , receive):
self.count += 1
if(self.count == self.expected):
exit()
pingpong_ping_lf.reaction_function_1()
return 0
def reaction_function_1(self ):
if self.count != self.expected:
sys.stderr.write("Pong expected to receive {:d} inputs, but it received {:d}.\n".format(self.expected, self.count))
exit(1)
print("Success.")
return 0
# Instantiate classes
pingpong_ping_lf = _Ping(bank_index = 0, count=EXPECTED)
pingpong_pong_lf = _Pong(bank_index = 0, expected=EXPECTED)
# The main function
def main():
pingpong_ping_lf.reaction_function_1()
# As is customary in Python programs, the main() function
# should only be executed if the main module is active.
if __name__=="__main__":
main()
| 29.132075
| 127
| 0.668394
|
from LinguaFrancaBase.constants import *
from LinguaFrancaBase.functions import *
from LinguaFrancaBase.classes import *
import sys
import copy
sys.setrecursionlimit(100000)
EXPECTED = 10000
class _Ping:
count = 1000000
pingsLeft = count
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def reaction_function_1(self):
self.pingsLeft -= 1
if self.pingsLeft > 0:
pingpong_pong_lf.reaction_function_0(self.pingsLeft)
else:
exit()
return 0
class _Pong:
expected = 1000000
count = 0
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def reaction_function_0(self , receive):
self.count += 1
if(self.count == self.expected):
exit()
pingpong_ping_lf.reaction_function_1()
return 0
def reaction_function_1(self ):
if self.count != self.expected:
sys.stderr.write("Pong expected to receive {:d} inputs, but it received {:d}.\n".format(self.expected, self.count))
exit(1)
print("Success.")
return 0
pingpong_ping_lf = _Ping(bank_index = 0, count=EXPECTED)
pingpong_pong_lf = _Pong(bank_index = 0, expected=EXPECTED)
def main():
pingpong_ping_lf.reaction_function_1()
if __name__=="__main__":
main()
| true
| true
|
f71944995b26a94873f53f48809fb0b8c10684e6
| 11,354
|
py
|
Python
|
ministack/build.py
|
lyndon160/ref
|
122e8315c00784c58285c6ad54bf8fffd39623fa
|
[
"Apache-2.0"
] | 6
|
2018-10-22T09:43:46.000Z
|
2021-11-15T11:08:54.000Z
|
ministack/build.py
|
lyndon160/REF
|
122e8315c00784c58285c6ad54bf8fffd39623fa
|
[
"Apache-2.0"
] | 5
|
2016-01-29T16:51:39.000Z
|
2020-08-01T16:16:29.000Z
|
ministack/build.py
|
lyndon160/REF
|
122e8315c00784c58285c6ad54bf8fffd39623fa
|
[
"Apache-2.0"
] | 4
|
2016-01-05T20:41:18.000Z
|
2019-05-12T10:15:56.000Z
|
#!/usr/bin/python
#
# build.py
#
# chain nova and neuton service requests to build predefined compute topologies
#
# topology is defined in python language 'spec' files
#
# by default the program will build the topology requested form a clean slate,
# but give the correct option (-c) it will also attempt to complete a build when some elements already exist, e.g. networks or compute instances
# Or, if option -d is given it will attempt to delete any VMs
# Of option -d is given twice it will also attempt to remove networks named in the spec file
#
# before commencing work the program will make some basic checks on the spec file,
# e.g. checking that the named keypairs, flavors and images exist
#
##
import time
import sys
import traceback
from socket import gethostbyname
from os import environ as env
import os
import argparse
from pprint import pprint
import novaclient.client
from neutron import Neutron
spec_error = False
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--auth','-a', action='store_true')
group.add_argument('--dryrun','-n', action='store_true')
group.add_argument('--build','-b', action='store_true')
group.add_argument('--delete', '-d', action='count')
group.add_argument('--suspend', '-s', action='store_true')
group.add_argument('--resume', '-r', action='store_true')
group.add_argument('--complete','-c', action='store_true')
parser.add_argument('specfile')
args=parser.parse_args()
specfile=args.specfile
# build is the default action
build = not (args.resume or args.suspend or args.complete or args.dryrun or args.delete)
if ( not os.access(specfile,os.R_OK)):
print "spec file not readable"
sys.exit(1)
name,extension = os.path.splitext(specfile)
if ( extension and extension != ".py"):
print "spec file not a python script"
sys.exit(1)
try:
_imp = __import__(name)
spec = _imp.spec
print "reading template: %s" % spec['name']
except:
print "couldn't read the spec file '%s'" % specfile
sys.exit(1)
if spec['credentials'] and spec['controller']:
# print "using OpenStack auth credentials from spec file"
credentials = spec['credentials']
auth_url = "http://" + spec['controller'] + ":35357/v2.0"
elif (os.environ['OS_USERNAME'] and os.environ['OS_PASSWORD'] and os.environ['OS_AUTH_URL'] and os.environ['OS_TENANT_NAME']):
print "using OpenStack auth credentials from environment"
credentials = { 'user' : os.environ['OS_USERNAME'],
'password' : os.environ['OS_PASSWORD'],
'project' : os.environ['OS_TENANT_NAME'] }
auth_url = os.environ['OS_AUTH_URL']
else:
print "Can't find OpenStack auth credentials in environment or spec file, giving up..."
sys.exit(1)
if args.auth:
print "credentials string:\nexport OS_USERNAME=%s OS_PASSWORD=%s OS_TENANT_NAME=%s OS_AUTH_URL=%s" % (credentials['user'],credentials['password'],credentials['project'],auth_url)
sys.exit(0)
config = {}
config['external_network_name'] = spec.get('external network name')
#config['external_network_name'] = get(spec['external network name']
config['dns'] = spec['dns']
neutron = Neutron(auth_url, credentials, config)
nova = novaclient.client.Client("2",
username = credentials['user'],
api_key = credentials['password'],
project_id = credentials['project'],
auth_url = auth_url)
servers = nova.servers.list()
server_list = {}
for server in servers:
server_list[server.name] = (server.id,server.status)
def server_suspend(name):
for s in servers:
if s.name == name:
(id,status) = server_list[name]
if (status == 'ACTIVE'):
response = nova.servers.suspend(s)
print "suspend %s" % name, response
else:
print "Can't suspend server %s in state %s " % (name,status)
def server_resume(name):
for s in servers:
if s.name == name:
response = nova.servers.resume(s)
print "resume %s" % name, response
def server_delete(name):
for s in servers:
if s.name == name:
response = nova.servers.delete(s)
print "delete %s" % name, response
# the net_list somewhat duplicates functionality in the neutron library (net_by_name)
# and should probably be removed
# however, the possibility of non-unique net names should be considered
# before completly removing visibility of net IDs in this code...
net_list = {}
for net in neutron.networks:
net_list[net['name']] = net['id']
def name_to_address(name):
global spec_error
if "*" == name:
return "*"
try:
address = gethostbyname(name)
except:
print "Unexpected error looking up hostname '%s':" % name, sys.exc_info()[0]
spec_error = True
return name
if address:
return address
print stderr,"host lookup failed for '%s'" % name
spec_error = True
return name
def check_keypair(name):
try:
return not ( nova.keypairs.get(name).deleted )
except novaclient.exceptions.NotFound:
return False
except:
print "Unexpected error:", sys.exc_info()[0]
raise
if (not args.delete):
if check_keypair(spec['keypair']):
pass
else:
print "checking keypair failed"
sys.exit(1)
net_builder = {}
host_builder = {}
router_needed = {}
if (args.resume or args.suspend):
pass
elif ( not spec['Networks']):
print "warning: no Networks in spec file"
# sys.exit(1)
else:
for net in spec['Networks']:
net_name = net['name']
if (not args.delete):
if (net_name in net_list):
if (args.dryrun or build):
spec_error = True
print "Build warning - network %s is already defined" % net_name
else:
print "network %s exists" % net_name
else:
net_builder[net_name] = net
elif (args.delete > 1):
if (net_name in net_list):
net_builder[net_name] = net
else:
print "Can't delete non-existent network %s" % net_name
if ( not spec['Hosts']):
print "Warning - no hosts section in spec file"
else:
print "processing servers"
for host in spec['Hosts']:
if (build or args.dryrun or args.complete):
print "building host ", host['name'] , host['image'], host['flavor'], host.get('net'), host.get('env')
if (host['name'] in server_list):
if (args.complete):
print "host %s exists" % host['name']
else:
spec_error = True
print "Build Error - host %s is already defined" % host['name']
else:
print "checking host image ", host['image']
image = nova.images.find(name=host['image'])
print "checking host flavor ", host['flavor']
flavor = nova.flavors.find(name=host['flavor'])
nets = []
try:
for net_entry in host.get('net'):
# a host network entry defines the network to be used, the assigned IP, and an optional floating IP
# the network name is the actual name used in openstack, and must either be defined in the spec file or already exist
# there must be a (local) IP (OpenStack insists...) but it can be wildcarded, in which case one will be selected from the pool
# the optional third field is for a floating IP - this can be either a domain name or an IP
# in either case it will be assigned from the external network range which is defined in the spec file
# the first (local) IP may also be a hostname, which is most useful for cases where the attached network is directly routable
ip = None
fip_id = None
if isinstance(net_entry,tuple):
name = net_entry[0]
if len(net_entry) > 1:
ip = name_to_address(net_entry[1])
if len(net_entry) > 2:
fip = name_to_address(net_entry[2])
fip_id = neutron.get_floatingip(config['external_network_name'],fip,args.dryrun)
elif isinstance(net_entry,basestring):
name = net_entry
else:
# if net_entry is neither a string or a tuple then I am confused....
print "Why me....?"
sys.exit(1)
if (name in net_builder or name in net_list):
nets.append((name,ip,fip_id))
else:
print "Build warning - host network %s not defined" % name
spec_error = not (args.complete)
host_builder[host['name']] = (image,flavor,nets)
except:
print "this is an unexpected exception!"
print(traceback.format_exc())
sys.exit(1)
else:
if (host['name'] in server_list):
print "processing host %s" % host['name']
host_builder[host['name']] = None
else:
print "not processing host %s (server does not exist)" % host['name']
if (spec_error):
print "not building cluster due to spec errors"
sys.exit(1)
if (args.dryrun):
print "dryrun only - not processing cluster"
sys.exit(0)
def process_networks():
for net in net_builder.values():
net_name = net['name']
if (args.delete):
neutron.net_delete(net_list[net_name])
else:
net_id = neutron.net_build(net)
if (net_id):
net_list[net_name] = net_id
else:
print "error: failed to build network %s" % net_name
sys.exit(1)
def process_servers():
print "processing servers"
if (args.delete):
for k in host_builder.keys():
server_delete(k)
elif (args.suspend):
for k in host_builder.keys():
server_suspend(k)
elif (args.resume):
for k in host_builder.keys():
server_resume(k)
else:
for k,(i,f,ns) in host_builder.items():
print "host %s : (%s,%s)" % (k,i,f)
nics=[]
for (name,ip,fip_id) in ns:
id=net_list[name]
port_id = neutron.port_build(id,ip)
nics.append({'port-id': port_id})
if (fip_id): # floating IP requested
neutron.floatingip_bind(port_id,fip_id)
instance = nova.servers.create(name=k, image=i, flavor=f, key_name=spec['keypair'], nics=nics, config_drive=True)
if (args.delete > 1):
process_servers()
process_networks()
elif (build or args.complete):
process_networks()
process_servers()
else:
process_servers()
| 36.27476
| 182
| 0.586577
|
import time
import sys
import traceback
from socket import gethostbyname
from os import environ as env
import os
import argparse
from pprint import pprint
import novaclient.client
from neutron import Neutron
spec_error = False
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--auth','-a', action='store_true')
group.add_argument('--dryrun','-n', action='store_true')
group.add_argument('--build','-b', action='store_true')
group.add_argument('--delete', '-d', action='count')
group.add_argument('--suspend', '-s', action='store_true')
group.add_argument('--resume', '-r', action='store_true')
group.add_argument('--complete','-c', action='store_true')
parser.add_argument('specfile')
args=parser.parse_args()
specfile=args.specfile
build = not (args.resume or args.suspend or args.complete or args.dryrun or args.delete)
if ( not os.access(specfile,os.R_OK)):
print "spec file not readable"
sys.exit(1)
name,extension = os.path.splitext(specfile)
if ( extension and extension != ".py"):
print "spec file not a python script"
sys.exit(1)
try:
_imp = __import__(name)
spec = _imp.spec
print "reading template: %s" % spec['name']
except:
print "couldn't read the spec file '%s'" % specfile
sys.exit(1)
if spec['credentials'] and spec['controller']:
# print "using OpenStack auth credentials from spec file"
credentials = spec['credentials']
auth_url = "http://" + spec['controller'] + ":35357/v2.0"
elif (os.environ['OS_USERNAME'] and os.environ['OS_PASSWORD'] and os.environ['OS_AUTH_URL'] and os.environ['OS_TENANT_NAME']):
print "using OpenStack auth credentials from environment"
credentials = { 'user' : os.environ['OS_USERNAME'],
'password' : os.environ['OS_PASSWORD'],
'project' : os.environ['OS_TENANT_NAME'] }
auth_url = os.environ['OS_AUTH_URL']
else:
print "Can't find OpenStack auth credentials in environment or spec file, giving up..."
sys.exit(1)
if args.auth:
print "credentials string:\nexport OS_USERNAME=%s OS_PASSWORD=%s OS_TENANT_NAME=%s OS_AUTH_URL=%s" % (credentials['user'],credentials['password'],credentials['project'],auth_url)
sys.exit(0)
config = {}
config['external_network_name'] = spec.get('external network name')
config['dns'] = spec['dns']
neutron = Neutron(auth_url, credentials, config)
nova = novaclient.client.Client("2",
username = credentials['user'],
api_key = credentials['password'],
project_id = credentials['project'],
auth_url = auth_url)
servers = nova.servers.list()
server_list = {}
for server in servers:
server_list[server.name] = (server.id,server.status)
def server_suspend(name):
for s in servers:
if s.name == name:
(id,status) = server_list[name]
if (status == 'ACTIVE'):
response = nova.servers.suspend(s)
print "suspend %s" % name, response
else:
print "Can't suspend server %s in state %s " % (name,status)
def server_resume(name):
for s in servers:
if s.name == name:
response = nova.servers.resume(s)
print "resume %s" % name, response
def server_delete(name):
for s in servers:
if s.name == name:
response = nova.servers.delete(s)
print "delete %s" % name, response
# the net_list somewhat duplicates functionality in the neutron library (net_by_name)
# and should probably be removed
# however, the possibility of non-unique net names should be considered
# before completly removing visibility of net IDs in this code...
net_list = {}
for net in neutron.networks:
net_list[net['name']] = net['id']
def name_to_address(name):
global spec_error
if "*" == name:
return "*"
try:
address = gethostbyname(name)
except:
print "Unexpected error looking up hostname '%s':" % name, sys.exc_info()[0]
spec_error = True
return name
if address:
return address
print stderr,"host lookup failed for '%s'" % name
spec_error = True
return name
def check_keypair(name):
try:
return not ( nova.keypairs.get(name).deleted )
except novaclient.exceptions.NotFound:
return False
except:
print "Unexpected error:", sys.exc_info()[0]
raise
if (not args.delete):
if check_keypair(spec['keypair']):
pass
else:
print "checking keypair failed"
sys.exit(1)
net_builder = {}
host_builder = {}
router_needed = {}
if (args.resume or args.suspend):
pass
elif ( not spec['Networks']):
print "warning: no Networks in spec file"
# sys.exit(1)
else:
for net in spec['Networks']:
net_name = net['name']
if (not args.delete):
if (net_name in net_list):
if (args.dryrun or build):
spec_error = True
print "Build warning - network %s is already defined" % net_name
else:
print "network %s exists" % net_name
else:
net_builder[net_name] = net
elif (args.delete > 1):
if (net_name in net_list):
net_builder[net_name] = net
else:
print "Can't delete non-existent network %s" % net_name
if ( not spec['Hosts']):
print "Warning - no hosts section in spec file"
else:
print "processing servers"
for host in spec['Hosts']:
if (build or args.dryrun or args.complete):
print "building host ", host['name'] , host['image'], host['flavor'], host.get('net'), host.get('env')
if (host['name'] in server_list):
if (args.complete):
print "host %s exists" % host['name']
else:
spec_error = True
print "Build Error - host %s is already defined" % host['name']
else:
print "checking host image ", host['image']
image = nova.images.find(name=host['image'])
print "checking host flavor ", host['flavor']
flavor = nova.flavors.find(name=host['flavor'])
nets = []
try:
for net_entry in host.get('net'):
ip = None
fip_id = None
if isinstance(net_entry,tuple):
name = net_entry[0]
if len(net_entry) > 1:
ip = name_to_address(net_entry[1])
if len(net_entry) > 2:
fip = name_to_address(net_entry[2])
fip_id = neutron.get_floatingip(config['external_network_name'],fip,args.dryrun)
elif isinstance(net_entry,basestring):
name = net_entry
else:
print "Why me....?"
sys.exit(1)
if (name in net_builder or name in net_list):
nets.append((name,ip,fip_id))
else:
print "Build warning - host network %s not defined" % name
spec_error = not (args.complete)
host_builder[host['name']] = (image,flavor,nets)
except:
print "this is an unexpected exception!"
print(traceback.format_exc())
sys.exit(1)
else:
if (host['name'] in server_list):
print "processing host %s" % host['name']
host_builder[host['name']] = None
else:
print "not processing host %s (server does not exist)" % host['name']
if (spec_error):
print "not building cluster due to spec errors"
sys.exit(1)
if (args.dryrun):
print "dryrun only - not processing cluster"
sys.exit(0)
def process_networks():
for net in net_builder.values():
net_name = net['name']
if (args.delete):
neutron.net_delete(net_list[net_name])
else:
net_id = neutron.net_build(net)
if (net_id):
net_list[net_name] = net_id
else:
print "error: failed to build network %s" % net_name
sys.exit(1)
def process_servers():
print "processing servers"
if (args.delete):
for k in host_builder.keys():
server_delete(k)
elif (args.suspend):
for k in host_builder.keys():
server_suspend(k)
elif (args.resume):
for k in host_builder.keys():
server_resume(k)
else:
for k,(i,f,ns) in host_builder.items():
print "host %s : (%s,%s)" % (k,i,f)
nics=[]
for (name,ip,fip_id) in ns:
id=net_list[name]
port_id = neutron.port_build(id,ip)
nics.append({'port-id': port_id})
if (fip_id):
neutron.floatingip_bind(port_id,fip_id)
instance = nova.servers.create(name=k, image=i, flavor=f, key_name=spec['keypair'], nics=nics, config_drive=True)
if (args.delete > 1):
process_servers()
process_networks()
elif (build or args.complete):
process_networks()
process_servers()
else:
process_servers()
| false
| true
|
f719453dae93648775b69c46831ebbe5c6081c50
| 3,822
|
py
|
Python
|
pygasus/storage/sql/query_builder.py
|
talismud/pygasus
|
fb01c8bd51003b5a008b572182a96bad86ef769f
|
[
"BSD-3-Clause"
] | 2
|
2021-11-18T09:35:10.000Z
|
2021-11-18T14:46:32.000Z
|
pygasus/storage/sql/query_builder.py
|
talismud/pygasus
|
fb01c8bd51003b5a008b572182a96bad86ef769f
|
[
"BSD-3-Clause"
] | null | null | null |
pygasus/storage/sql/query_builder.py
|
talismud/pygasus
|
fb01c8bd51003b5a008b572182a96bad86ef769f
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""SQLAlchemy query builder."""
from pygasus.storage.query_builder import AbstractQueryBuilder
class SQLQueryBuilder(AbstractQueryBuilder):
"""Query builder for SQLAlchemy."""
def _get_table(self, field):
"""Return the table for this model."""
model = field.__model__
model_name = getattr(
model.__config__, "model_name", model.__name__.lower()
)
return self.storage_engine.tables[model_name]
def eq(self, field, other):
"""Compare field to other."""
table = self._get_table(field)
return getattr(table.c, field.name) == other
def ne(self, field, other):
"""Compare field to other."""
table = self._get_table(field)
return getattr(table.c, field.name) != other
def lt(self, field, other):
"""Compare field to other."""
table = self._get_table(field)
return getattr(table.c, field.name) < other
def le(self, field, other):
"""Compare field to other."""
table = self._get_table(field)
return getattr(table.c, field.name) <= other
def gt(self, field, other):
"""Compare field to other."""
table = self._get_table(field)
return getattr(table.c, field.name) > other
def ge(self, field, other):
"""Compare field to other."""
table = self._get_table(field)
return getattr(table.c, field.name) >= other
def is_in(self, field, collection):
"""Filter fields with a value in a collection."""
table = self._get_table(field)
return getattr(table.c, field.name).in_(collection)
def is_not_in(self, field, collection):
"""Filter fields with a value not in a collection."""
table = self._get_table(field)
return getattr(table.c, field.name).not_in(collection)
def has(self, field, value):
"""Return models with the field having this value (flag)."""
table = self._get_table(field)
return getattr(table.c, field.name).op("&")(value.value) == value.value
def has_not(self, field, value):
"""Return models without the field having this value (flag)."""
table = self._get_table(field)
return getattr(table.c, field.name).op("&")(value.value) != value.value
| 39.8125
| 79
| 0.68786
|
from pygasus.storage.query_builder import AbstractQueryBuilder
class SQLQueryBuilder(AbstractQueryBuilder):
def _get_table(self, field):
model = field.__model__
model_name = getattr(
model.__config__, "model_name", model.__name__.lower()
)
return self.storage_engine.tables[model_name]
def eq(self, field, other):
table = self._get_table(field)
return getattr(table.c, field.name) == other
def ne(self, field, other):
table = self._get_table(field)
return getattr(table.c, field.name) != other
def lt(self, field, other):
table = self._get_table(field)
return getattr(table.c, field.name) < other
def le(self, field, other):
table = self._get_table(field)
return getattr(table.c, field.name) <= other
def gt(self, field, other):
table = self._get_table(field)
return getattr(table.c, field.name) > other
def ge(self, field, other):
table = self._get_table(field)
return getattr(table.c, field.name) >= other
def is_in(self, field, collection):
table = self._get_table(field)
return getattr(table.c, field.name).in_(collection)
def is_not_in(self, field, collection):
table = self._get_table(field)
return getattr(table.c, field.name).not_in(collection)
def has(self, field, value):
table = self._get_table(field)
return getattr(table.c, field.name).op("&")(value.value) == value.value
def has_not(self, field, value):
table = self._get_table(field)
return getattr(table.c, field.name).op("&")(value.value) != value.value
| true
| true
|
f719456656da103360e5cac1aae1790b2a55d482
| 20,904
|
py
|
Python
|
octavia/amphorae/drivers/haproxy/rest_api_driver.py
|
acdc-cloud/openstack-octavia
|
f68460ddd31f9b09d59fff876f103324078473a6
|
[
"Apache-2.0"
] | null | null | null |
octavia/amphorae/drivers/haproxy/rest_api_driver.py
|
acdc-cloud/openstack-octavia
|
f68460ddd31f9b09d59fff876f103324078473a6
|
[
"Apache-2.0"
] | null | null | null |
octavia/amphorae/drivers/haproxy/rest_api_driver.py
|
acdc-cloud/openstack-octavia
|
f68460ddd31f9b09d59fff876f103324078473a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import hashlib
import time
import warnings
from oslo_log import log as logging
import requests
import simplejson
import six
from stevedore import driver as stevedore_driver
from octavia.amphorae.driver_exceptions import exceptions as driver_except
from octavia.amphorae.drivers import driver_base
from octavia.amphorae.drivers.haproxy import exceptions as exc
from octavia.amphorae.drivers.keepalived import vrrp_rest_driver
from octavia.common.config import cfg
from octavia.common import constants as consts
from octavia.common.jinja.haproxy import jinja_cfg
from octavia.common.jinja.lvs import jinja_cfg as jinja_udp_cfg
from octavia.common.tls_utils import cert_parser
from octavia.common import utils
LOG = logging.getLogger(__name__)
API_VERSION = consts.API_VERSION
OCTAVIA_API_CLIENT = (
"Octavia HaProxy Rest Client/{version} "
"(https://wiki.openstack.org/wiki/Octavia)").format(version=API_VERSION)
CONF = cfg.CONF
class HaproxyAmphoraLoadBalancerDriver(
driver_base.AmphoraLoadBalancerDriver,
vrrp_rest_driver.KeepalivedAmphoraDriverMixin):
def __init__(self):
super(HaproxyAmphoraLoadBalancerDriver, self).__init__()
self.client = AmphoraAPIClient()
self.cert_manager = stevedore_driver.DriverManager(
namespace='octavia.cert_manager',
name=CONF.certificates.cert_manager,
invoke_on_load=True,
).driver
self.jinja = jinja_cfg.JinjaTemplater(
base_amp_path=CONF.haproxy_amphora.base_path,
base_crt_dir=CONF.haproxy_amphora.base_cert_dir,
haproxy_template=CONF.haproxy_amphora.haproxy_template,
connection_logging=CONF.haproxy_amphora.connection_logging)
self.udp_jinja = jinja_udp_cfg.LvsJinjaTemplater()
def update_amphora_listeners(self, listeners, amphora_index,
amphorae, timeout_dict=None):
"""Update the amphora with a new configuration.
:param listeners: List of listeners to update.
:type listener: list
:param amphora_id: The ID of the amphora to update
:type amphora_id: string
:param timeout_dict: Dictionary of timeout values for calls to the
amphora. May contain: req_conn_timeout,
req_read_timeout, conn_max_retries,
conn_retry_interval
:returns: None
Updates the configuration of the listeners on a single amphora.
"""
# if the amphora does not yet have listeners, no need to update them.
if not listeners:
LOG.debug('No listeners found to update.')
return
amp = amphorae[amphora_index]
if amp is None or amp.status == consts.DELETED:
return
# TODO(johnsom) remove when we don't have a process per listener
for listener in listeners:
LOG.debug("%s updating listener %s on amphora %s",
self.__class__.__name__, listener.id, amp.id)
if listener.protocol == 'UDP':
# Generate Keepalived LVS configuration from listener object
config = self.udp_jinja.build_config(listener=listener)
self.client.upload_udp_config(amp, listener.id, config,
timeout_dict=timeout_dict)
self.client.reload_listener(amp, listener.id,
timeout_dict=timeout_dict)
else:
certs = self._process_tls_certificates(listener)
# Generate HaProxy configuration from listener object
config = self.jinja.build_config(
host_amphora=amp,
listener=listener,
tls_cert=certs['tls_cert'])
self.client.upload_config(amp, listener.id, config,
timeout_dict=timeout_dict)
self.client.reload_listener(amp, listener.id,
timeout_dict=timeout_dict)
def _udp_update(self, listener, vip):
LOG.debug("Amphora %s keepalivedlvs, updating "
"listener %s, vip %s",
self.__class__.__name__, listener.protocol_port,
vip.ip_address)
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
# Generate Keepalived LVS configuration from listener object
config = self.udp_jinja.build_config(listener=listener)
self.client.upload_udp_config(amp, listener.id, config)
self.client.reload_listener(amp, listener.id)
def update(self, listener, vip):
if listener.protocol == 'UDP':
self._udp_update(listener, vip)
else:
LOG.debug("Amphora %s haproxy, updating listener %s, "
"vip %s", self.__class__.__name__,
listener.protocol_port,
vip.ip_address)
# Process listener certificate info
certs = self._process_tls_certificates(listener)
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
# Generate HaProxy configuration from listener object
config = self.jinja.build_config(
host_amphora=amp,
listener=listener,
tls_cert=certs['tls_cert'])
self.client.upload_config(amp, listener.id, config)
self.client.reload_listener(amp, listener.id)
def upload_cert_amp(self, amp, pem):
LOG.debug("Amphora %s updating cert in REST driver "
"with amphora id %s,",
self.__class__.__name__, amp.id)
self.client.update_cert_for_rotation(amp, pem)
def _apply(self, func, listener=None, amphora=None, *args):
if amphora is None:
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
func(amp, listener.id, *args)
else:
if amphora.status != consts.DELETED:
func(amphora, listener.id, *args)
def stop(self, listener, vip):
self._apply(self.client.stop_listener, listener)
def start(self, listener, vip, amphora=None):
self._apply(self.client.start_listener, listener, amphora)
def delete(self, listener, vip):
self._apply(self.client.delete_listener, listener)
def get_info(self, amphora):
return self.client.get_info(amphora)
def get_diagnostics(self, amphora):
pass
def finalize_amphora(self, amphora):
pass
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config):
if amphora.status != consts.DELETED:
subnet = amphorae_network_config.get(amphora.id).vip_subnet
# NOTE(blogan): using the vrrp port here because that
# is what the allowed address pairs network driver sets
# this particular port to. This does expose a bit of
# tight coupling between the network driver and amphora
# driver. We will need to revisit this to try and remove
# this tight coupling.
# NOTE (johnsom): I am loading the vrrp_ip into the
# net_info structure here so that I don't break
# compatibility with old amphora agent versions.
port = amphorae_network_config.get(amphora.id).vrrp_port
LOG.debug("Post-VIP-Plugging with vrrp_ip %s vrrp_port %s",
amphora.vrrp_ip, port.id)
host_routes = [{'nexthop': hr.nexthop,
'destination': hr.destination}
for hr in subnet.host_routes]
net_info = {'subnet_cidr': subnet.cidr,
'gateway': subnet.gateway_ip,
'mac_address': port.mac_address,
'vrrp_ip': amphora.vrrp_ip,
'mtu': port.network.mtu,
'host_routes': host_routes}
try:
self.client.plug_vip(amphora,
load_balancer.vip.ip_address,
net_info)
except exc.Conflict:
LOG.warning('VIP with MAC %(mac)s already exists on amphora, '
'skipping post_vip_plug',
{'mac': port.mac_address})
def post_network_plug(self, amphora, port):
fixed_ips = []
for fixed_ip in port.fixed_ips:
host_routes = [{'nexthop': hr.nexthop,
'destination': hr.destination}
for hr in fixed_ip.subnet.host_routes]
ip = {'ip_address': fixed_ip.ip_address,
'subnet_cidr': fixed_ip.subnet.cidr,
'host_routes': host_routes}
fixed_ips.append(ip)
port_info = {'mac_address': port.mac_address,
'fixed_ips': fixed_ips,
'mtu': port.network.mtu}
try:
self.client.plug_network(amphora, port_info)
except exc.Conflict:
LOG.warning('Network with MAC %(mac)s already exists on amphora, '
'skipping post_network_plug',
{'mac': port.mac_address})
def _process_tls_certificates(self, listener):
"""Processes TLS data from the listener.
Converts and uploads PEM data to the Amphora API
return TLS_CERT and SNI_CERTS
"""
tls_cert = None
sni_certs = []
certs = []
data = cert_parser.load_certificates_data(
self.cert_manager, listener)
if data['tls_cert'] is not None:
tls_cert = data['tls_cert']
certs.append(tls_cert)
if data['sni_certs']:
sni_certs = data['sni_certs']
certs.extend(sni_certs)
for cert in certs:
pem = cert_parser.build_pem(cert)
md5 = hashlib.md5(pem).hexdigest() # nosec
name = '{id}.pem'.format(id=cert.id)
self._apply(self._upload_cert, listener, None, pem, md5, name)
return {'tls_cert': tls_cert, 'sni_certs': sni_certs}
def _upload_cert(self, amp, listener_id, pem, md5, name):
try:
if self.client.get_cert_md5sum(
amp, listener_id, name, ignore=(404,)) == md5:
return
except exc.NotFound:
pass
self.client.upload_cert_pem(
amp, listener_id, name, pem)
# Check a custom hostname
class CustomHostNameCheckingAdapter(requests.adapters.HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.assert_hostname = self.uuid
return super(CustomHostNameCheckingAdapter,
self).cert_verify(conn, url, verify, cert)
class AmphoraAPIClient(object):
def __init__(self):
super(AmphoraAPIClient, self).__init__()
self.secure = False
self.get = functools.partial(self.request, 'get')
self.post = functools.partial(self.request, 'post')
self.put = functools.partial(self.request, 'put')
self.delete = functools.partial(self.request, 'delete')
self.head = functools.partial(self.request, 'head')
self.start_listener = functools.partial(self._action,
consts.AMP_ACTION_START)
self.stop_listener = functools.partial(self._action,
consts.AMP_ACTION_STOP)
self.reload_listener = functools.partial(self._action,
consts.AMP_ACTION_RELOAD)
self.start_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_START)
self.stop_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_STOP)
self.reload_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_RELOAD)
self.session = requests.Session()
self.session.cert = CONF.haproxy_amphora.client_cert
self.ssl_adapter = CustomHostNameCheckingAdapter()
self.session.mount('https://', self.ssl_adapter)
def _base_url(self, ip):
if utils.is_ipv6_lla(ip):
ip = '[{ip}%{interface}]'.format(
ip=ip,
interface=CONF.haproxy_amphora.lb_network_interface)
elif utils.is_ipv6(ip):
ip = '[{ip}]'.format(ip=ip)
return "https://{ip}:{port}/{version}/".format(
ip=ip,
port=CONF.haproxy_amphora.bind_port,
version=API_VERSION)
def request(self, method, amp, path='/', timeout_dict=None, **kwargs):
cfg_ha_amp = CONF.haproxy_amphora
if timeout_dict is None:
timeout_dict = {}
req_conn_timeout = timeout_dict.get(
consts.REQ_CONN_TIMEOUT, cfg_ha_amp.rest_request_conn_timeout)
req_read_timeout = timeout_dict.get(
consts.REQ_READ_TIMEOUT, cfg_ha_amp.rest_request_read_timeout)
conn_max_retries = timeout_dict.get(
consts.CONN_MAX_RETRIES, cfg_ha_amp.connection_max_retries)
conn_retry_interval = timeout_dict.get(
consts.CONN_RETRY_INTERVAL, cfg_ha_amp.connection_retry_interval)
LOG.debug("request url %s", path)
_request = getattr(self.session, method.lower())
_url = self._base_url(amp.lb_network_ip) + path
LOG.debug("request url %s", _url)
reqargs = {
'verify': CONF.haproxy_amphora.server_ca,
'url': _url,
'timeout': (req_conn_timeout, req_read_timeout), }
reqargs.update(kwargs)
headers = reqargs.setdefault('headers', {})
headers['User-Agent'] = OCTAVIA_API_CLIENT
self.ssl_adapter.uuid = amp.id
exception = None
# Keep retrying
for a in six.moves.xrange(conn_max_retries):
try:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="A true SSLContext object is not available"
)
r = _request(**reqargs)
LOG.debug('Connected to amphora. Response: %(resp)s',
{'resp': r})
content_type = r.headers.get('content-type', '')
# Check the 404 to see if it is just that the network in the
# amphora is not yet up, in which case retry.
# Otherwise return the response quickly.
if r.status_code == 404:
LOG.debug('Got a 404 (content-type: %(content_type)s) -- '
'connection data: %(content)s',
{'content_type': content_type,
'content': r.content})
if content_type.find("application/json") == -1:
LOG.debug("Amphora agent not ready.")
raise requests.ConnectionError
try:
json_data = r.json().get('details', '')
if 'No suitable network interface found' in json_data:
LOG.debug("Amphora network interface not found.")
raise requests.ConnectionError
except simplejson.JSONDecodeError: # if r.json() fails
pass # TODO(rm_work) Should we do something?
return r
except (requests.ConnectionError, requests.Timeout) as e:
exception = e
LOG.warning("Could not connect to instance. Retrying.")
time.sleep(conn_retry_interval)
LOG.error("Connection retries (currently set to %(max_retries)s) "
"exhausted. The amphora is unavailable. Reason: "
"%(exception)s",
{'max_retries': conn_max_retries,
'exception': exception})
raise driver_except.TimeOutException()
def upload_config(self, amp, listener_id, config, timeout_dict=None):
r = self.put(
amp,
'listeners/{amphora_id}/{listener_id}/haproxy'.format(
amphora_id=amp.id, listener_id=listener_id), timeout_dict,
data=config)
return exc.check_exception(r)
def get_listener_status(self, amp, listener_id):
r = self.get(
amp,
'listeners/{listener_id}'.format(listener_id=listener_id))
if exc.check_exception(r):
return r.json()
return None
def _action(self, action, amp, listener_id, timeout_dict=None):
r = self.put(amp, 'listeners/{listener_id}/{action}'.format(
listener_id=listener_id, action=action), timeout_dict=timeout_dict)
return exc.check_exception(r)
def upload_cert_pem(self, amp, listener_id, pem_filename, pem_file):
r = self.put(
amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename),
data=pem_file)
return exc.check_exception(r)
def update_cert_for_rotation(self, amp, pem_file):
r = self.put(amp, 'certificate', data=pem_file)
return exc.check_exception(r)
def get_cert_md5sum(self, amp, listener_id, pem_filename, ignore=tuple()):
r = self.get(amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename))
if exc.check_exception(r, ignore):
return r.json().get("md5sum")
return None
def delete_listener(self, amp, listener_id):
r = self.delete(
amp, 'listeners/{listener_id}'.format(listener_id=listener_id))
return exc.check_exception(r, (404,))
def get_info(self, amp):
r = self.get(amp, "info")
if exc.check_exception(r):
return r.json()
return None
def get_details(self, amp):
r = self.get(amp, "details")
if exc.check_exception(r):
return r.json()
return None
def get_all_listeners(self, amp):
r = self.get(amp, "listeners")
if exc.check_exception(r):
return r.json()
return None
def delete_cert_pem(self, amp, listener_id, pem_filename):
r = self.delete(
amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename))
return exc.check_exception(r, (404,))
def plug_network(self, amp, port):
r = self.post(amp, 'plug/network',
json=port)
return exc.check_exception(r)
def plug_vip(self, amp, vip, net_info):
r = self.post(amp,
'plug/vip/{vip}'.format(vip=vip),
json=net_info)
return exc.check_exception(r)
def upload_vrrp_config(self, amp, config):
r = self.put(amp, 'vrrp/upload', data=config)
return exc.check_exception(r)
def _vrrp_action(self, action, amp):
r = self.put(amp, 'vrrp/{action}'.format(action=action))
return exc.check_exception(r)
def get_interface(self, amp, ip_addr, timeout_dict=None):
r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr),
timeout_dict=timeout_dict)
if exc.check_exception(r):
return r.json()
return None
def upload_udp_config(self, amp, listener_id, config, timeout_dict=None):
r = self.put(
amp,
'listeners/{amphora_id}/{listener_id}/udp_listener'.format(
amphora_id=amp.id, listener_id=listener_id), timeout_dict,
data=config)
return exc.check_exception(r)
| 41.975904
| 79
| 0.591179
|
import functools
import hashlib
import time
import warnings
from oslo_log import log as logging
import requests
import simplejson
import six
from stevedore import driver as stevedore_driver
from octavia.amphorae.driver_exceptions import exceptions as driver_except
from octavia.amphorae.drivers import driver_base
from octavia.amphorae.drivers.haproxy import exceptions as exc
from octavia.amphorae.drivers.keepalived import vrrp_rest_driver
from octavia.common.config import cfg
from octavia.common import constants as consts
from octavia.common.jinja.haproxy import jinja_cfg
from octavia.common.jinja.lvs import jinja_cfg as jinja_udp_cfg
from octavia.common.tls_utils import cert_parser
from octavia.common import utils
LOG = logging.getLogger(__name__)
API_VERSION = consts.API_VERSION
OCTAVIA_API_CLIENT = (
"Octavia HaProxy Rest Client/{version} "
"(https://wiki.openstack.org/wiki/Octavia)").format(version=API_VERSION)
CONF = cfg.CONF
class HaproxyAmphoraLoadBalancerDriver(
driver_base.AmphoraLoadBalancerDriver,
vrrp_rest_driver.KeepalivedAmphoraDriverMixin):
def __init__(self):
super(HaproxyAmphoraLoadBalancerDriver, self).__init__()
self.client = AmphoraAPIClient()
self.cert_manager = stevedore_driver.DriverManager(
namespace='octavia.cert_manager',
name=CONF.certificates.cert_manager,
invoke_on_load=True,
).driver
self.jinja = jinja_cfg.JinjaTemplater(
base_amp_path=CONF.haproxy_amphora.base_path,
base_crt_dir=CONF.haproxy_amphora.base_cert_dir,
haproxy_template=CONF.haproxy_amphora.haproxy_template,
connection_logging=CONF.haproxy_amphora.connection_logging)
self.udp_jinja = jinja_udp_cfg.LvsJinjaTemplater()
def update_amphora_listeners(self, listeners, amphora_index,
amphorae, timeout_dict=None):
if not listeners:
LOG.debug('No listeners found to update.')
return
amp = amphorae[amphora_index]
if amp is None or amp.status == consts.DELETED:
return
for listener in listeners:
LOG.debug("%s updating listener %s on amphora %s",
self.__class__.__name__, listener.id, amp.id)
if listener.protocol == 'UDP':
# Generate Keepalived LVS configuration from listener object
config = self.udp_jinja.build_config(listener=listener)
self.client.upload_udp_config(amp, listener.id, config,
timeout_dict=timeout_dict)
self.client.reload_listener(amp, listener.id,
timeout_dict=timeout_dict)
else:
certs = self._process_tls_certificates(listener)
# Generate HaProxy configuration from listener object
config = self.jinja.build_config(
host_amphora=amp,
listener=listener,
tls_cert=certs['tls_cert'])
self.client.upload_config(amp, listener.id, config,
timeout_dict=timeout_dict)
self.client.reload_listener(amp, listener.id,
timeout_dict=timeout_dict)
def _udp_update(self, listener, vip):
LOG.debug("Amphora %s keepalivedlvs, updating "
"listener %s, vip %s",
self.__class__.__name__, listener.protocol_port,
vip.ip_address)
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
# Generate Keepalived LVS configuration from listener object
config = self.udp_jinja.build_config(listener=listener)
self.client.upload_udp_config(amp, listener.id, config)
self.client.reload_listener(amp, listener.id)
def update(self, listener, vip):
if listener.protocol == 'UDP':
self._udp_update(listener, vip)
else:
LOG.debug("Amphora %s haproxy, updating listener %s, "
"vip %s", self.__class__.__name__,
listener.protocol_port,
vip.ip_address)
# Process listener certificate info
certs = self._process_tls_certificates(listener)
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
# Generate HaProxy configuration from listener object
config = self.jinja.build_config(
host_amphora=amp,
listener=listener,
tls_cert=certs['tls_cert'])
self.client.upload_config(amp, listener.id, config)
self.client.reload_listener(amp, listener.id)
def upload_cert_amp(self, amp, pem):
LOG.debug("Amphora %s updating cert in REST driver "
"with amphora id %s,",
self.__class__.__name__, amp.id)
self.client.update_cert_for_rotation(amp, pem)
def _apply(self, func, listener=None, amphora=None, *args):
if amphora is None:
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
func(amp, listener.id, *args)
else:
if amphora.status != consts.DELETED:
func(amphora, listener.id, *args)
def stop(self, listener, vip):
self._apply(self.client.stop_listener, listener)
def start(self, listener, vip, amphora=None):
self._apply(self.client.start_listener, listener, amphora)
def delete(self, listener, vip):
self._apply(self.client.delete_listener, listener)
def get_info(self, amphora):
return self.client.get_info(amphora)
def get_diagnostics(self, amphora):
pass
def finalize_amphora(self, amphora):
pass
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config):
if amphora.status != consts.DELETED:
subnet = amphorae_network_config.get(amphora.id).vip_subnet
# NOTE(blogan): using the vrrp port here because that
# is what the allowed address pairs network driver sets
# this particular port to. This does expose a bit of
# tight coupling between the network driver and amphora
# driver. We will need to revisit this to try and remove
# this tight coupling.
# NOTE (johnsom): I am loading the vrrp_ip into the
# net_info structure here so that I don't break
port = amphorae_network_config.get(amphora.id).vrrp_port
LOG.debug("Post-VIP-Plugging with vrrp_ip %s vrrp_port %s",
amphora.vrrp_ip, port.id)
host_routes = [{'nexthop': hr.nexthop,
'destination': hr.destination}
for hr in subnet.host_routes]
net_info = {'subnet_cidr': subnet.cidr,
'gateway': subnet.gateway_ip,
'mac_address': port.mac_address,
'vrrp_ip': amphora.vrrp_ip,
'mtu': port.network.mtu,
'host_routes': host_routes}
try:
self.client.plug_vip(amphora,
load_balancer.vip.ip_address,
net_info)
except exc.Conflict:
LOG.warning('VIP with MAC %(mac)s already exists on amphora, '
'skipping post_vip_plug',
{'mac': port.mac_address})
def post_network_plug(self, amphora, port):
fixed_ips = []
for fixed_ip in port.fixed_ips:
host_routes = [{'nexthop': hr.nexthop,
'destination': hr.destination}
for hr in fixed_ip.subnet.host_routes]
ip = {'ip_address': fixed_ip.ip_address,
'subnet_cidr': fixed_ip.subnet.cidr,
'host_routes': host_routes}
fixed_ips.append(ip)
port_info = {'mac_address': port.mac_address,
'fixed_ips': fixed_ips,
'mtu': port.network.mtu}
try:
self.client.plug_network(amphora, port_info)
except exc.Conflict:
LOG.warning('Network with MAC %(mac)s already exists on amphora, '
'skipping post_network_plug',
{'mac': port.mac_address})
def _process_tls_certificates(self, listener):
tls_cert = None
sni_certs = []
certs = []
data = cert_parser.load_certificates_data(
self.cert_manager, listener)
if data['tls_cert'] is not None:
tls_cert = data['tls_cert']
certs.append(tls_cert)
if data['sni_certs']:
sni_certs = data['sni_certs']
certs.extend(sni_certs)
for cert in certs:
pem = cert_parser.build_pem(cert)
md5 = hashlib.md5(pem).hexdigest()
name = '{id}.pem'.format(id=cert.id)
self._apply(self._upload_cert, listener, None, pem, md5, name)
return {'tls_cert': tls_cert, 'sni_certs': sni_certs}
def _upload_cert(self, amp, listener_id, pem, md5, name):
try:
if self.client.get_cert_md5sum(
amp, listener_id, name, ignore=(404,)) == md5:
return
except exc.NotFound:
pass
self.client.upload_cert_pem(
amp, listener_id, name, pem)
class CustomHostNameCheckingAdapter(requests.adapters.HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.assert_hostname = self.uuid
return super(CustomHostNameCheckingAdapter,
self).cert_verify(conn, url, verify, cert)
class AmphoraAPIClient(object):
def __init__(self):
super(AmphoraAPIClient, self).__init__()
self.secure = False
self.get = functools.partial(self.request, 'get')
self.post = functools.partial(self.request, 'post')
self.put = functools.partial(self.request, 'put')
self.delete = functools.partial(self.request, 'delete')
self.head = functools.partial(self.request, 'head')
self.start_listener = functools.partial(self._action,
consts.AMP_ACTION_START)
self.stop_listener = functools.partial(self._action,
consts.AMP_ACTION_STOP)
self.reload_listener = functools.partial(self._action,
consts.AMP_ACTION_RELOAD)
self.start_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_START)
self.stop_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_STOP)
self.reload_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_RELOAD)
self.session = requests.Session()
self.session.cert = CONF.haproxy_amphora.client_cert
self.ssl_adapter = CustomHostNameCheckingAdapter()
self.session.mount('https://', self.ssl_adapter)
def _base_url(self, ip):
if utils.is_ipv6_lla(ip):
ip = '[{ip}%{interface}]'.format(
ip=ip,
interface=CONF.haproxy_amphora.lb_network_interface)
elif utils.is_ipv6(ip):
ip = '[{ip}]'.format(ip=ip)
return "https://{ip}:{port}/{version}/".format(
ip=ip,
port=CONF.haproxy_amphora.bind_port,
version=API_VERSION)
def request(self, method, amp, path='/', timeout_dict=None, **kwargs):
cfg_ha_amp = CONF.haproxy_amphora
if timeout_dict is None:
timeout_dict = {}
req_conn_timeout = timeout_dict.get(
consts.REQ_CONN_TIMEOUT, cfg_ha_amp.rest_request_conn_timeout)
req_read_timeout = timeout_dict.get(
consts.REQ_READ_TIMEOUT, cfg_ha_amp.rest_request_read_timeout)
conn_max_retries = timeout_dict.get(
consts.CONN_MAX_RETRIES, cfg_ha_amp.connection_max_retries)
conn_retry_interval = timeout_dict.get(
consts.CONN_RETRY_INTERVAL, cfg_ha_amp.connection_retry_interval)
LOG.debug("request url %s", path)
_request = getattr(self.session, method.lower())
_url = self._base_url(amp.lb_network_ip) + path
LOG.debug("request url %s", _url)
reqargs = {
'verify': CONF.haproxy_amphora.server_ca,
'url': _url,
'timeout': (req_conn_timeout, req_read_timeout), }
reqargs.update(kwargs)
headers = reqargs.setdefault('headers', {})
headers['User-Agent'] = OCTAVIA_API_CLIENT
self.ssl_adapter.uuid = amp.id
exception = None
for a in six.moves.xrange(conn_max_retries):
try:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="A true SSLContext object is not available"
)
r = _request(**reqargs)
LOG.debug('Connected to amphora. Response: %(resp)s',
{'resp': r})
content_type = r.headers.get('content-type', '')
if r.status_code == 404:
LOG.debug('Got a 404 (content-type: %(content_type)s) -- '
'connection data: %(content)s',
{'content_type': content_type,
'content': r.content})
if content_type.find("application/json") == -1:
LOG.debug("Amphora agent not ready.")
raise requests.ConnectionError
try:
json_data = r.json().get('details', '')
if 'No suitable network interface found' in json_data:
LOG.debug("Amphora network interface not found.")
raise requests.ConnectionError
except simplejson.JSONDecodeError:
pass
return r
except (requests.ConnectionError, requests.Timeout) as e:
exception = e
LOG.warning("Could not connect to instance. Retrying.")
time.sleep(conn_retry_interval)
LOG.error("Connection retries (currently set to %(max_retries)s) "
"exhausted. The amphora is unavailable. Reason: "
"%(exception)s",
{'max_retries': conn_max_retries,
'exception': exception})
raise driver_except.TimeOutException()
def upload_config(self, amp, listener_id, config, timeout_dict=None):
r = self.put(
amp,
'listeners/{amphora_id}/{listener_id}/haproxy'.format(
amphora_id=amp.id, listener_id=listener_id), timeout_dict,
data=config)
return exc.check_exception(r)
def get_listener_status(self, amp, listener_id):
r = self.get(
amp,
'listeners/{listener_id}'.format(listener_id=listener_id))
if exc.check_exception(r):
return r.json()
return None
def _action(self, action, amp, listener_id, timeout_dict=None):
r = self.put(amp, 'listeners/{listener_id}/{action}'.format(
listener_id=listener_id, action=action), timeout_dict=timeout_dict)
return exc.check_exception(r)
def upload_cert_pem(self, amp, listener_id, pem_filename, pem_file):
r = self.put(
amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename),
data=pem_file)
return exc.check_exception(r)
def update_cert_for_rotation(self, amp, pem_file):
r = self.put(amp, 'certificate', data=pem_file)
return exc.check_exception(r)
def get_cert_md5sum(self, amp, listener_id, pem_filename, ignore=tuple()):
r = self.get(amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename))
if exc.check_exception(r, ignore):
return r.json().get("md5sum")
return None
def delete_listener(self, amp, listener_id):
r = self.delete(
amp, 'listeners/{listener_id}'.format(listener_id=listener_id))
return exc.check_exception(r, (404,))
def get_info(self, amp):
r = self.get(amp, "info")
if exc.check_exception(r):
return r.json()
return None
def get_details(self, amp):
r = self.get(amp, "details")
if exc.check_exception(r):
return r.json()
return None
def get_all_listeners(self, amp):
r = self.get(amp, "listeners")
if exc.check_exception(r):
return r.json()
return None
def delete_cert_pem(self, amp, listener_id, pem_filename):
r = self.delete(
amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename))
return exc.check_exception(r, (404,))
def plug_network(self, amp, port):
r = self.post(amp, 'plug/network',
json=port)
return exc.check_exception(r)
def plug_vip(self, amp, vip, net_info):
r = self.post(amp,
'plug/vip/{vip}'.format(vip=vip),
json=net_info)
return exc.check_exception(r)
def upload_vrrp_config(self, amp, config):
r = self.put(amp, 'vrrp/upload', data=config)
return exc.check_exception(r)
def _vrrp_action(self, action, amp):
r = self.put(amp, 'vrrp/{action}'.format(action=action))
return exc.check_exception(r)
def get_interface(self, amp, ip_addr, timeout_dict=None):
r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr),
timeout_dict=timeout_dict)
if exc.check_exception(r):
return r.json()
return None
def upload_udp_config(self, amp, listener_id, config, timeout_dict=None):
r = self.put(
amp,
'listeners/{amphora_id}/{listener_id}/udp_listener'.format(
amphora_id=amp.id, listener_id=listener_id), timeout_dict,
data=config)
return exc.check_exception(r)
| true
| true
|
f71945ac8b27591fea8b5cba11d2e49aa0f11109
| 8,813
|
py
|
Python
|
__init__.py
|
mastnym/cbpi-SimpleCascadeHysteresis
|
06e0a5d46868aaf7fef304eaed3308dc9b5ed269
|
[
"MIT"
] | null | null | null |
__init__.py
|
mastnym/cbpi-SimpleCascadeHysteresis
|
06e0a5d46868aaf7fef304eaed3308dc9b5ed269
|
[
"MIT"
] | null | null | null |
__init__.py
|
mastnym/cbpi-SimpleCascadeHysteresis
|
06e0a5d46868aaf7fef304eaed3308dc9b5ed269
|
[
"MIT"
] | null | null | null |
import time
from modules import cbpi
from modules.core.controller import KettleController
from modules.core.props import Property
@cbpi.controller
class SimpleCascadeHysteresis(KettleController):
"""
This hysteresis controls MashTun temp. It creates hysteresis on HLT temp not allowing it to reach
much higher values than desired mash tun temp (target). It allows to set offset to target MT temp
and temp is held in these values so there is not so much overshooting HLT temp
In other words target temp is set in mash tun but is regulated with hystersis in HLT
There is also a "safety check" which is the temp of coil/tube in Herms/Rims breweries, which is often
much higher than desired target temp. In this plugin, this temp is also switching off the heater with
adjustable offset.
"""
pos_off_desc = "Positive value indicating possibility to go above target temp with actor still switched on. If target is 55 and offset is 1, heater will switch off when reaching 56."
neg_off_desc = "Positive value indicating possibility to go below target temp with actor still switched off. If target is 55 and offset is 1, heater will switch back on when reaching 54."
coil_sensor_desc = "Safety measurement for preventing overheating in Herms coil or rims tube. Leave blank if you don't have sensor after coil/tube."
coil_off_desc = "Positive value indicating, when the heater will switch off if the temp at the end of coil/tube is above the target by this value or more. This helps to prevent rising the temp in HLT too much."
a_hyst_sensor = Property.Sensor(label="HLT sensor")
b_hysteresis_positive_offset = Property.Number("Positive offset for hysteresis", True, 1, description=pos_off_desc)
c_hysteresis_negative_offset = Property.Number("Negative offset for hysteresis", True, 0, description=neg_off_desc)
d_on_min = Property.Number("Hysteresis Minimum Time On (s)", True, 60)
e_off_min = Property.Number("Hysteresis Minimum Time Off (s)", True, 60)
f_coil_tube_sensor = Property.Sensor(label="Sensor after the HERMS coil or RIMS tube", description=coil_sensor_desc)
g_coil_positive_offset = Property.Number("Positive offset for coil/tube", True, 1.5, description=coil_off_desc)
def stop(self):
self.heater_off()
super(KettleController, self).stop()
def run(self):
on_min = abs(float(self.d_on_min))
off_min = abs(float(self.e_off_min))
hyst_pos_offset = abs(float(self.b_hysteresis_positive_offset))
hyst_neg_offset = abs(float(self.c_hysteresis_negative_offset))
coil_pos_offset = abs(float(self.g_coil_positive_offset))
hyst_sensor = int(self.a_hyst_sensor)
if not self.f_coil_tube_sensor:
coil_sensor = None
coil_pos_offset = None
else:
coil_sensor = int(self.f_coil_tube_sensor)
h = HysteresisWithTimeChecksAndSafetySwitch(True,
hyst_pos_offset,
hyst_neg_offset,
on_min,
off_min,
safety_switch_offset=coil_pos_offset)
heater_on = False
while self.is_running():
waketime = time.time() + 3
target = self.get_target_temp()
current = self.get_temp()
# target reached in MT we can switch off no matter what
if current >= target:
self.heater_off()
cbpi.app.logger.info("[%s] Target temp reached" % (waketime))
self.sleep(waketime - time.time())
continue
# get control switch temp only if we have control switch
control = None
if coil_sensor is not None:
control = float(cbpi.cache.get("sensors")[coil_sensor].instance.last_value)
hyst_temp = float(cbpi.cache.get("sensors")[hyst_sensor].instance.last_value)
# Update the hysteresis controller
try:
heater_on = h.run(hyst_temp, target, control)
except TimeIntervalNotPassed as e:
self.notify("Hysteresis warning", e.message, type="warning", timeout=1500)
if heater_on:
self.heater_on(100)
cbpi.app.logger.info("[%s] Actor stays ON" % (waketime))
else:
self.heater_off()
cbpi.app.logger.info("[%s] Actor stays OFF" % (waketime))
# Sleep until update required again
if waketime <= time.time() + 0.25:
self.notify("Hysteresis Error", "Update interval is too short", type="warning")
cbpi.app.logger.info("Hysteresis - Update interval is too short")
else:
self.sleep(waketime - time.time())
class Hysteresis(object):
ROUND = 2
def __init__(self, rising, off_offset, on_offset):
self.rising = rising
self.off_offset = abs(off_offset)
self.on_offset = abs(on_offset)
self.action = False
def switch_off(self):
self.action = False
def switch_on(self):
self.action = True
def round(self, *args):
return [round(arg, 2) for arg in args]
def run(self, current, target):
current, target = self.round(current, target)
# Switching off rising eg heating
if self.rising and current >= (target + self.off_offset):
self.switch_off()
# Switching off dropping eg cooling
elif not self.rising and current <= (target - self.off_offset):
self.switch_off()
# switching on rising eg heater
elif self.rising and current <= target - self.on_offset:
self.switch_on()
# Switching on dropping eg cooling
elif not self.rising and current >= target + self.on_offset:
self.switch_on()
return self.action
class HysteresisSafetySwitch(object):
"""
Safety switch is another value which controls the hysteresis and has precedence of current value
"""
def __init__(self, *args, **kwargs):
self.ss_offset = kwargs.pop("safety_switch_offset", None)
if self.ss_offset is not None:
self.ss_offset = abs(self.ss_offset)
super(HysteresisSafetySwitch, self).__init__(*args, **kwargs)
def run(self, current, target, control):
# not using this switch, run regular hysteresis
if self.ss_offset is None or control is None:
super(HysteresisSafetySwitch, self).run(current, target)
return self.action
current, target, control = self.round(current, target, control)
if self.rising and control >= target + self.ss_offset:
self.switch_off()
elif not self.rising and control <= target - self.ss_offset:
self.switch_off()
else:
super(HysteresisSafetySwitch, self).run(current, target)
return self.action
class HysteresisWithSafetySwitch(HysteresisSafetySwitch, Hysteresis):
pass
class TimeIntervalNotPassed(Exception):
pass
class HysteresisWithTimeChecks(Hysteresis):
def __init__(self, rising, off_offset, on_offset, minimum_time_on, minimum_time_off):
super(HysteresisWithTimeChecks, self).__init__(rising, off_offset, on_offset)
self.min_on = minimum_time_on
self.min_off = minimum_time_off
self.last_switch = None
def switch_off(self):
# We are off and need to switch off
if not self.action:
return
# last time should not be None when switching off
elapsed = time.time() - self.last_switch
if elapsed >= self.min_on:
self.last_switch = time.time()
super(HysteresisWithTimeChecks, self).switch_off()
else:
raise TimeIntervalNotPassed(
"Should be switching off now, but can't because of safety interval set (time since last switch: {}s)".format(
round(elapsed, 0)))
def switch_on(self):
# We are on and need to switch on
if self.action:
return
if self.last_switch is None or time.time() - self.last_switch >= self.min_off:
self.last_switch = time.time()
super(HysteresisWithTimeChecks, self).switch_on()
else:
raise TimeIntervalNotPassed(
"Should be switching on now, but can't because of safety interval set (time since last switch: {}s).".format(
round(time.time() - self.last_switch, 0)))
class HysteresisWithTimeChecksAndSafetySwitch(HysteresisSafetySwitch, HysteresisWithTimeChecks):
pass
| 44.510101
| 214
| 0.641439
|
import time
from modules import cbpi
from modules.core.controller import KettleController
from modules.core.props import Property
@cbpi.controller
class SimpleCascadeHysteresis(KettleController):
pos_off_desc = "Positive value indicating possibility to go above target temp with actor still switched on. If target is 55 and offset is 1, heater will switch off when reaching 56."
neg_off_desc = "Positive value indicating possibility to go below target temp with actor still switched off. If target is 55 and offset is 1, heater will switch back on when reaching 54."
coil_sensor_desc = "Safety measurement for preventing overheating in Herms coil or rims tube. Leave blank if you don't have sensor after coil/tube."
coil_off_desc = "Positive value indicating, when the heater will switch off if the temp at the end of coil/tube is above the target by this value or more. This helps to prevent rising the temp in HLT too much."
a_hyst_sensor = Property.Sensor(label="HLT sensor")
b_hysteresis_positive_offset = Property.Number("Positive offset for hysteresis", True, 1, description=pos_off_desc)
c_hysteresis_negative_offset = Property.Number("Negative offset for hysteresis", True, 0, description=neg_off_desc)
d_on_min = Property.Number("Hysteresis Minimum Time On (s)", True, 60)
e_off_min = Property.Number("Hysteresis Minimum Time Off (s)", True, 60)
f_coil_tube_sensor = Property.Sensor(label="Sensor after the HERMS coil or RIMS tube", description=coil_sensor_desc)
g_coil_positive_offset = Property.Number("Positive offset for coil/tube", True, 1.5, description=coil_off_desc)
def stop(self):
self.heater_off()
super(KettleController, self).stop()
def run(self):
on_min = abs(float(self.d_on_min))
off_min = abs(float(self.e_off_min))
hyst_pos_offset = abs(float(self.b_hysteresis_positive_offset))
hyst_neg_offset = abs(float(self.c_hysteresis_negative_offset))
coil_pos_offset = abs(float(self.g_coil_positive_offset))
hyst_sensor = int(self.a_hyst_sensor)
if not self.f_coil_tube_sensor:
coil_sensor = None
coil_pos_offset = None
else:
coil_sensor = int(self.f_coil_tube_sensor)
h = HysteresisWithTimeChecksAndSafetySwitch(True,
hyst_pos_offset,
hyst_neg_offset,
on_min,
off_min,
safety_switch_offset=coil_pos_offset)
heater_on = False
while self.is_running():
waketime = time.time() + 3
target = self.get_target_temp()
current = self.get_temp()
# target reached in MT we can switch off no matter what
if current >= target:
self.heater_off()
cbpi.app.logger.info("[%s] Target temp reached" % (waketime))
self.sleep(waketime - time.time())
continue
# get control switch temp only if we have control switch
control = None
if coil_sensor is not None:
control = float(cbpi.cache.get("sensors")[coil_sensor].instance.last_value)
hyst_temp = float(cbpi.cache.get("sensors")[hyst_sensor].instance.last_value)
# Update the hysteresis controller
try:
heater_on = h.run(hyst_temp, target, control)
except TimeIntervalNotPassed as e:
self.notify("Hysteresis warning", e.message, type="warning", timeout=1500)
if heater_on:
self.heater_on(100)
cbpi.app.logger.info("[%s] Actor stays ON" % (waketime))
else:
self.heater_off()
cbpi.app.logger.info("[%s] Actor stays OFF" % (waketime))
# Sleep until update required again
if waketime <= time.time() + 0.25:
self.notify("Hysteresis Error", "Update interval is too short", type="warning")
cbpi.app.logger.info("Hysteresis - Update interval is too short")
else:
self.sleep(waketime - time.time())
class Hysteresis(object):
ROUND = 2
def __init__(self, rising, off_offset, on_offset):
self.rising = rising
self.off_offset = abs(off_offset)
self.on_offset = abs(on_offset)
self.action = False
def switch_off(self):
self.action = False
def switch_on(self):
self.action = True
def round(self, *args):
return [round(arg, 2) for arg in args]
def run(self, current, target):
current, target = self.round(current, target)
# Switching off rising eg heating
if self.rising and current >= (target + self.off_offset):
self.switch_off()
# Switching off dropping eg cooling
elif not self.rising and current <= (target - self.off_offset):
self.switch_off()
# switching on rising eg heater
elif self.rising and current <= target - self.on_offset:
self.switch_on()
# Switching on dropping eg cooling
elif not self.rising and current >= target + self.on_offset:
self.switch_on()
return self.action
class HysteresisSafetySwitch(object):
def __init__(self, *args, **kwargs):
self.ss_offset = kwargs.pop("safety_switch_offset", None)
if self.ss_offset is not None:
self.ss_offset = abs(self.ss_offset)
super(HysteresisSafetySwitch, self).__init__(*args, **kwargs)
def run(self, current, target, control):
# not using this switch, run regular hysteresis
if self.ss_offset is None or control is None:
super(HysteresisSafetySwitch, self).run(current, target)
return self.action
current, target, control = self.round(current, target, control)
if self.rising and control >= target + self.ss_offset:
self.switch_off()
elif not self.rising and control <= target - self.ss_offset:
self.switch_off()
else:
super(HysteresisSafetySwitch, self).run(current, target)
return self.action
class HysteresisWithSafetySwitch(HysteresisSafetySwitch, Hysteresis):
pass
class TimeIntervalNotPassed(Exception):
pass
class HysteresisWithTimeChecks(Hysteresis):
def __init__(self, rising, off_offset, on_offset, minimum_time_on, minimum_time_off):
super(HysteresisWithTimeChecks, self).__init__(rising, off_offset, on_offset)
self.min_on = minimum_time_on
self.min_off = minimum_time_off
self.last_switch = None
def switch_off(self):
# We are off and need to switch off
if not self.action:
return
# last time should not be None when switching off
elapsed = time.time() - self.last_switch
if elapsed >= self.min_on:
self.last_switch = time.time()
super(HysteresisWithTimeChecks, self).switch_off()
else:
raise TimeIntervalNotPassed(
"Should be switching off now, but can't because of safety interval set (time since last switch: {}s)".format(
round(elapsed, 0)))
def switch_on(self):
if self.action:
return
if self.last_switch is None or time.time() - self.last_switch >= self.min_off:
self.last_switch = time.time()
super(HysteresisWithTimeChecks, self).switch_on()
else:
raise TimeIntervalNotPassed(
"Should be switching on now, but can't because of safety interval set (time since last switch: {}s).".format(
round(time.time() - self.last_switch, 0)))
class HysteresisWithTimeChecksAndSafetySwitch(HysteresisSafetySwitch, HysteresisWithTimeChecks):
pass
| true
| true
|
f719467cb68b35eb1744903f1645e3dc36285aae
| 182
|
py
|
Python
|
Others/joi/joi2021yo1b/a/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 2
|
2020-06-12T09:54:23.000Z
|
2021-05-04T01:34:07.000Z
|
Others/joi/joi2021yo1b/a/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 961
|
2020-06-23T07:26:22.000Z
|
2022-03-31T21:34:52.000Z
|
Others/joi/joi2021yo1b/a/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
def main():
a, b, c = map(int, input().split())
if a <= c < b:
print(1)
else:
print(0)
if __name__ == "__main__":
main()
| 12.133333
| 39
| 0.434066
|
def main():
a, b, c = map(int, input().split())
if a <= c < b:
print(1)
else:
print(0)
if __name__ == "__main__":
main()
| true
| true
|
f719472b0ea2f7adf53faaf80d9dfeb1915076da
| 286
|
py
|
Python
|
awswrangler/__metadata__.py
|
Thiago-Dantas/aws-data-wrangler
|
b13fcd8d169feb3219b4b4fff025dc6089cfe03b
|
[
"Apache-2.0"
] | 1
|
2021-04-27T12:56:28.000Z
|
2021-04-27T12:56:28.000Z
|
awswrangler/__metadata__.py
|
Thiago-Dantas/aws-data-wrangler
|
b13fcd8d169feb3219b4b4fff025dc6089cfe03b
|
[
"Apache-2.0"
] | 63
|
2021-05-31T08:35:17.000Z
|
2022-03-28T08:12:04.000Z
|
awswrangler/__metadata__.py
|
kukushking/aws-data-wrangler
|
c91188472f96b222c943b35b3b082c0ba5e54745
|
[
"Apache-2.0"
] | null | null | null |
"""Metadata Module.
Source repository: https://github.com/awslabs/aws-data-wrangler
Documentation: https://aws-data-wrangler.readthedocs.io/
"""
__title__: str = "awswrangler"
__description__: str = "Pandas on AWS."
__version__: str = "2.8.0"
__license__: str = "Apache License 2.0"
| 23.833333
| 63
| 0.734266
|
__title__: str = "awswrangler"
__description__: str = "Pandas on AWS."
__version__: str = "2.8.0"
__license__: str = "Apache License 2.0"
| true
| true
|
f71949204a3a8f20fdd5c84ef0a61e2047469716
| 7,418
|
py
|
Python
|
4course/theory_of_pl/course/test.py
|
soul-catcher/sibsutis
|
5d7d88ffabbe445052927eb6c6097697df672997
|
[
"WTFPL"
] | 10
|
2021-08-28T08:44:57.000Z
|
2022-03-06T16:29:51.000Z
|
4course/theory_of_pl/course/test.py
|
soul-catcher/sibsutis
|
5d7d88ffabbe445052927eb6c6097697df672997
|
[
"WTFPL"
] | null | null | null |
4course/theory_of_pl/course/test.py
|
soul-catcher/sibsutis
|
5d7d88ffabbe445052927eb6c6097697df672997
|
[
"WTFPL"
] | 6
|
2021-09-06T07:26:18.000Z
|
2021-12-16T16:11:10.000Z
|
import unittest
import utils
from grammar import Grammar
class SplitByCommasTest(unittest.TestCase):
def test_simple(self):
self.assertEqual(['A', 'B', 'C'], utils.split_by('A, B, C', ','))
def test_without_space(self):
self.assertEqual(['A', 'B', 'C'], utils.split_by('A,B,C', ','))
def test_with_chaotic_space(self):
self.assertEqual(['A', 'B', 'C'], utils.split_by(' A , B , C ', ','))
def test_without_commas(self):
self.assertEqual(['A B C'], utils.split_by('A B C', ','))
def test_zero_length(self):
self.assertEqual([], utils.split_by('', ','))
def test_only_spaces(self):
self.assertEqual([], utils.split_by(' ', ','))
def test_word(self):
self.assertEqual(['test'], utils.split_by('test', ','))
def test_several_words_without_commas(self):
self.assertEqual(['one two three'], utils.split_by(' one two three ', ','))
def test_several_words_with_commas(self):
self.assertEqual(['one', 'two', 'three'], utils.split_by('one, two,three', ','))
def test_chaotic_commas(self):
self.assertEqual(['A', 'B', 'C'], utils.split_by(',,A,,,B,C,', ','))
def test_only_comma(self):
self.assertEqual([], utils.split_by(',', ','))
class ParseRulesTest(unittest.TestCase):
def test_simple(self):
rules = 'A -> aAa'
expected = {'A': ['aAa']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_lambda(self):
rules = 'A -> aAa | @'
expected = {'A': ['aAa', '']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_lambda_left(self):
rules = 'A -> @ | aAa'
expected = {'A': ['', 'aAa']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_without_spaces(self):
rules = 'A->aAa|@'
expected = {'A': ['aAa', '']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_chaotic_spaces(self):
rules = ' AB C -> a A a | @ '
expected = {'AB C': ['a A a', '']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_empty_rules(self):
rules = ''
expected = {}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_multiline_rules(self):
rules = '''
A -> aAa
B -> bBb
'''
expected = {'A': ['aAa'], 'B': ['bBb']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_without_arrow(self):
with self.assertRaises(utils.WrongRulesException):
utils.parse_rules('A aAa', '@')
def test_wrong_place_arrow1(self):
with self.assertRaises(utils.WrongRulesException):
utils.parse_rules('-> A aAa', '@')
def test_wrong_place_arrow2(self):
with self.assertRaises(utils.WrongRulesException):
utils.parse_rules('A aAa -> ', '@')
class CanonGrammarTest(unittest.TestCase):
def test_find_non_child_free(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'S'],
{
'S': ['aAB', 'E'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC', 'aE'],
'D': ['a', 'c', 'Fb'],
'E': ['cE', 'aE', 'Eb', 'ED', 'FG'],
'F': ['BC', 'EC', 'AC'],
'G': ['Ga', 'Gb']
},
'S'
)
self.assertSetEqual({'E', 'G'}, grammar.find_child_free_non_terms())
def test_remove_rules1(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'S'],
{
'S': ['aAB', 'E'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC', 'aE'],
'D': ['a', 'c', 'Fb'],
'E': ['cE', 'aE', 'Eb', 'ED', 'FG'],
'F': ['BC', 'EC', 'AC'],
'G': ['Ga', 'Gb']
},
'S'
)
grammar_expected = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'F', 'S'],
{
'S': ['aAB'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC'],
'D': ['a', 'c', 'Fb'],
'F': ['BC', 'AC'],
},
'S'
)
self.assertEqual(grammar_expected, grammar.remove_rules({'E', 'G'}))
def test_remove_rules2(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'F', 'S'],
{
'S': ['aAB'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC'],
'D': ['a', 'c', 'Fb'],
'F': ['BC', 'AC'],
},
'S'
)
grammar_expected = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['aAB'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC'],
},
'S'
)
self.assertEqual(grammar_expected, grammar.remove_rules({'D', 'F'}))
def test_find_unreachable_rules(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'F', 'S'],
{
'S': ['aAB'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC'],
'D': ['a', 'c', 'Fb'],
'F': ['BC', 'AC'],
},
'S'
)
self.assertSetEqual({'D', 'F'}, grammar.find_unreachable_rules())
def test_remove_empty_rules(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['AaB', 'aB', 'cC'],
'A': ['AB', 'a', 'b', 'B'],
'B': ['Ba', ''],
'C': ['AB', 'c']
},
'S'
)
grammar_expected = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['AaB', 'cC', 'aB', 'Aa', 'a', 'c'],
'A': ['AB', 'b', 'a', 'B'],
'B': ['a', 'Ba'],
'C': ['AB', 'c', 'A', 'B']
},
'S'
)
self.assertEqual(grammar_expected, grammar.remove_empty_rules())
def test_remove_chain_rules(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['AaB', 'cC', 'aB', 'Aa', 'a', 'c'],
'A': ['AB', 'b', 'a', 'B'],
'B': ['a', 'Ba'],
'C': ['AB', 'c', 'A', 'B']
},
'S'
)
grammar_expected = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['AaB', 'cC', 'aB', 'Aa', 'a', 'c'],
'A': ['AB', 'b', 'a', 'Ba'],
'B': ['a', 'Ba'],
'C': ['AB', 'c', 'a', 'Ba', 'b']
},
'S'
)
self.assertEqual(grammar_expected, grammar.remove_chain_rules())
if __name__ == '__main__':
unittest.main()
| 30.780083
| 88
| 0.383392
|
import unittest
import utils
from grammar import Grammar
class SplitByCommasTest(unittest.TestCase):
def test_simple(self):
self.assertEqual(['A', 'B', 'C'], utils.split_by('A, B, C', ','))
def test_without_space(self):
self.assertEqual(['A', 'B', 'C'], utils.split_by('A,B,C', ','))
def test_with_chaotic_space(self):
self.assertEqual(['A', 'B', 'C'], utils.split_by(' A , B , C ', ','))
def test_without_commas(self):
self.assertEqual(['A B C'], utils.split_by('A B C', ','))
def test_zero_length(self):
self.assertEqual([], utils.split_by('', ','))
def test_only_spaces(self):
self.assertEqual([], utils.split_by(' ', ','))
def test_word(self):
self.assertEqual(['test'], utils.split_by('test', ','))
def test_several_words_without_commas(self):
self.assertEqual(['one two three'], utils.split_by(' one two three ', ','))
def test_several_words_with_commas(self):
self.assertEqual(['one', 'two', 'three'], utils.split_by('one, two,three', ','))
def test_chaotic_commas(self):
self.assertEqual(['A', 'B', 'C'], utils.split_by(',,A,,,B,C,', ','))
def test_only_comma(self):
self.assertEqual([], utils.split_by(',', ','))
class ParseRulesTest(unittest.TestCase):
def test_simple(self):
rules = 'A -> aAa'
expected = {'A': ['aAa']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_lambda(self):
rules = 'A -> aAa | @'
expected = {'A': ['aAa', '']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_lambda_left(self):
rules = 'A -> @ | aAa'
expected = {'A': ['', 'aAa']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_without_spaces(self):
rules = 'A->aAa|@'
expected = {'A': ['aAa', '']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_chaotic_spaces(self):
rules = ' AB C -> a A a | @ '
expected = {'AB C': ['a A a', '']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_empty_rules(self):
rules = ''
expected = {}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_multiline_rules(self):
rules = '''
A -> aAa
B -> bBb
'''
expected = {'A': ['aAa'], 'B': ['bBb']}
self.assertEqual(expected, utils.parse_rules(rules, '@'))
def test_without_arrow(self):
with self.assertRaises(utils.WrongRulesException):
utils.parse_rules('A aAa', '@')
def test_wrong_place_arrow1(self):
with self.assertRaises(utils.WrongRulesException):
utils.parse_rules('-> A aAa', '@')
def test_wrong_place_arrow2(self):
with self.assertRaises(utils.WrongRulesException):
utils.parse_rules('A aAa -> ', '@')
class CanonGrammarTest(unittest.TestCase):
def test_find_non_child_free(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'S'],
{
'S': ['aAB', 'E'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC', 'aE'],
'D': ['a', 'c', 'Fb'],
'E': ['cE', 'aE', 'Eb', 'ED', 'FG'],
'F': ['BC', 'EC', 'AC'],
'G': ['Ga', 'Gb']
},
'S'
)
self.assertSetEqual({'E', 'G'}, grammar.find_child_free_non_terms())
def test_remove_rules1(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'S'],
{
'S': ['aAB', 'E'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC', 'aE'],
'D': ['a', 'c', 'Fb'],
'E': ['cE', 'aE', 'Eb', 'ED', 'FG'],
'F': ['BC', 'EC', 'AC'],
'G': ['Ga', 'Gb']
},
'S'
)
grammar_expected = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'F', 'S'],
{
'S': ['aAB'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC'],
'D': ['a', 'c', 'Fb'],
'F': ['BC', 'AC'],
},
'S'
)
self.assertEqual(grammar_expected, grammar.remove_rules({'E', 'G'}))
def test_remove_rules2(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'F', 'S'],
{
'S': ['aAB'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC'],
'D': ['a', 'c', 'Fb'],
'F': ['BC', 'AC'],
},
'S'
)
grammar_expected = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['aAB'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC'],
},
'S'
)
self.assertEqual(grammar_expected, grammar.remove_rules({'D', 'F'}))
def test_find_unreachable_rules(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'D', 'F', 'S'],
{
'S': ['aAB'],
'A': ['aA', 'bB'],
'B': ['ACb', 'b'],
'C': ['A', 'bA', 'cC'],
'D': ['a', 'c', 'Fb'],
'F': ['BC', 'AC'],
},
'S'
)
self.assertSetEqual({'D', 'F'}, grammar.find_unreachable_rules())
def test_remove_empty_rules(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['AaB', 'aB', 'cC'],
'A': ['AB', 'a', 'b', 'B'],
'B': ['Ba', ''],
'C': ['AB', 'c']
},
'S'
)
grammar_expected = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['AaB', 'cC', 'aB', 'Aa', 'a', 'c'],
'A': ['AB', 'b', 'a', 'B'],
'B': ['a', 'Ba'],
'C': ['AB', 'c', 'A', 'B']
},
'S'
)
self.assertEqual(grammar_expected, grammar.remove_empty_rules())
def test_remove_chain_rules(self):
grammar = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['AaB', 'cC', 'aB', 'Aa', 'a', 'c'],
'A': ['AB', 'b', 'a', 'B'],
'B': ['a', 'Ba'],
'C': ['AB', 'c', 'A', 'B']
},
'S'
)
grammar_expected = Grammar(
['a', 'b', 'c'],
['A', 'B', 'C', 'S'],
{
'S': ['AaB', 'cC', 'aB', 'Aa', 'a', 'c'],
'A': ['AB', 'b', 'a', 'Ba'],
'B': ['a', 'Ba'],
'C': ['AB', 'c', 'a', 'Ba', 'b']
},
'S'
)
self.assertEqual(grammar_expected, grammar.remove_chain_rules())
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7194a37fbd1fbc139636447bb1502e8a31ec9fb
| 1,257
|
py
|
Python
|
backend2/venv/lib/python3.9/site-packages/authlib/jose/rfc7517/_cryptography_key.py
|
anushkas-bot/cube.js
|
fc5f66e20a7073fcdb1f279440bcd582c5ccc9da
|
[
"Cube",
"Apache-2.0",
"MIT"
] | 3,172
|
2017-11-11T05:54:14.000Z
|
2022-03-31T23:59:59.000Z
|
backend2/venv/lib/python3.9/site-packages/authlib/jose/rfc7517/_cryptography_key.py
|
anushkas-bot/cube.js
|
fc5f66e20a7073fcdb1f279440bcd582c5ccc9da
|
[
"Cube",
"Apache-2.0",
"MIT"
] | 397
|
2017-11-11T02:49:06.000Z
|
2022-03-31T21:02:37.000Z
|
backend2/venv/lib/python3.9/site-packages/authlib/jose/rfc7517/_cryptography_key.py
|
anushkas-bot/cube.js
|
fc5f66e20a7073fcdb1f279440bcd582c5ccc9da
|
[
"Cube",
"Apache-2.0",
"MIT"
] | 387
|
2017-11-18T08:59:56.000Z
|
2022-03-15T18:37:37.000Z
|
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.primitives.serialization import (
load_pem_private_key, load_pem_public_key, load_ssh_public_key,
)
from cryptography.hazmat.backends import default_backend
from authlib.common.encoding import to_bytes
def load_pem_key(raw, ssh_type=None, key_type=None, password=None):
raw = to_bytes(raw)
if ssh_type and raw.startswith(ssh_type):
return load_ssh_public_key(raw, backend=default_backend())
if key_type == 'public':
return load_pem_public_key(raw, backend=default_backend())
if key_type == 'private' or password is not None:
return load_pem_private_key(raw, password=password, backend=default_backend())
if b'PUBLIC' in raw:
return load_pem_public_key(raw, backend=default_backend())
if b'PRIVATE' in raw:
return load_pem_private_key(raw, password=password, backend=default_backend())
if b'CERTIFICATE' in raw:
cert = load_pem_x509_certificate(raw, default_backend())
return cert.public_key()
try:
return load_pem_private_key(raw, password=password, backend=default_backend())
except ValueError:
return load_pem_public_key(raw, backend=default_backend())
| 35.914286
| 86
| 0.747017
|
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.primitives.serialization import (
load_pem_private_key, load_pem_public_key, load_ssh_public_key,
)
from cryptography.hazmat.backends import default_backend
from authlib.common.encoding import to_bytes
def load_pem_key(raw, ssh_type=None, key_type=None, password=None):
raw = to_bytes(raw)
if ssh_type and raw.startswith(ssh_type):
return load_ssh_public_key(raw, backend=default_backend())
if key_type == 'public':
return load_pem_public_key(raw, backend=default_backend())
if key_type == 'private' or password is not None:
return load_pem_private_key(raw, password=password, backend=default_backend())
if b'PUBLIC' in raw:
return load_pem_public_key(raw, backend=default_backend())
if b'PRIVATE' in raw:
return load_pem_private_key(raw, password=password, backend=default_backend())
if b'CERTIFICATE' in raw:
cert = load_pem_x509_certificate(raw, default_backend())
return cert.public_key()
try:
return load_pem_private_key(raw, password=password, backend=default_backend())
except ValueError:
return load_pem_public_key(raw, backend=default_backend())
| true
| true
|
f7194ab1a937921f6752fb9ea13a7ad2b345d88f
| 2,008
|
py
|
Python
|
tests/conftest.py
|
amosbastian/python-fpl
|
3db8e0029d7cf07111db61ddee0b37b17b051bcd
|
[
"MIT"
] | 217
|
2018-01-17T10:03:07.000Z
|
2022-03-12T06:13:02.000Z
|
tests/conftest.py
|
amosbastian/python-fpl
|
3db8e0029d7cf07111db61ddee0b37b17b051bcd
|
[
"MIT"
] | 84
|
2018-04-23T09:56:16.000Z
|
2022-02-11T16:19:58.000Z
|
tests/conftest.py
|
amosbastian/python-fpl
|
3db8e0029d7cf07111db61ddee0b37b17b051bcd
|
[
"MIT"
] | 88
|
2018-04-21T08:07:16.000Z
|
2022-02-25T03:43:54.000Z
|
import aiohttp
import pytest
import os
from fpl import FPL
from fpl.models import Fixture, H2HLeague, User, ClassicLeague, Team, Gameweek
from tests.test_classic_league import classic_league_data
from tests.test_fixture import fixture_data
from tests.test_h2h_league import h2h_league_data
from tests.test_team import team_data
from tests.test_user import user_data
from tests.test_gameweek import gameweek_data
try:
from.temp_env_var import TEMP_ENV_VARS, ENV_VARS_TO_SUSPEND
except ImportError:
TEMP_ENV_VARS = {}
ENV_VARS_TO_SUSPEND = []
@pytest.fixture(scope="session", autouse=True)
def tests_setup_and_teardown():
# Will be executed before the first test
old_environ = dict(os.environ)
os.environ.update(TEMP_ENV_VARS)
for env_var in ENV_VARS_TO_SUSPEND:
os.environ.pop(env_var, default=None)
yield
# Will be executed after the last test
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture()
async def fpl():
session = aiohttp.ClientSession()
fpl = FPL(session)
yield fpl
await session.close()
@pytest.fixture()
async def classic_league():
session = aiohttp.ClientSession()
yield ClassicLeague(classic_league_data, session)
await session.close()
@pytest.fixture()
async def gameweek():
return Gameweek(gameweek_data)
@pytest.fixture()
async def player(fpl):
yield await fpl.get_player(345, include_summary=True)
@pytest.fixture()
async def settings(fpl):
yield await fpl.game_settings()
@pytest.fixture()
async def team():
session = aiohttp.ClientSession()
yield Team(team_data, session)
await session.close()
@pytest.fixture()
def fixture():
return Fixture(fixture_data)
@pytest.fixture()
async def h2h_league():
session = aiohttp.ClientSession()
yield H2HLeague(h2h_league_data, session)
await session.close()
@pytest.fixture()
async def user():
session = aiohttp.ClientSession()
yield User(user_data, session)
await session.close()
| 23.08046
| 78
| 0.744024
|
import aiohttp
import pytest
import os
from fpl import FPL
from fpl.models import Fixture, H2HLeague, User, ClassicLeague, Team, Gameweek
from tests.test_classic_league import classic_league_data
from tests.test_fixture import fixture_data
from tests.test_h2h_league import h2h_league_data
from tests.test_team import team_data
from tests.test_user import user_data
from tests.test_gameweek import gameweek_data
try:
from.temp_env_var import TEMP_ENV_VARS, ENV_VARS_TO_SUSPEND
except ImportError:
TEMP_ENV_VARS = {}
ENV_VARS_TO_SUSPEND = []
@pytest.fixture(scope="session", autouse=True)
def tests_setup_and_teardown():
old_environ = dict(os.environ)
os.environ.update(TEMP_ENV_VARS)
for env_var in ENV_VARS_TO_SUSPEND:
os.environ.pop(env_var, default=None)
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture()
async def fpl():
session = aiohttp.ClientSession()
fpl = FPL(session)
yield fpl
await session.close()
@pytest.fixture()
async def classic_league():
session = aiohttp.ClientSession()
yield ClassicLeague(classic_league_data, session)
await session.close()
@pytest.fixture()
async def gameweek():
return Gameweek(gameweek_data)
@pytest.fixture()
async def player(fpl):
yield await fpl.get_player(345, include_summary=True)
@pytest.fixture()
async def settings(fpl):
yield await fpl.game_settings()
@pytest.fixture()
async def team():
session = aiohttp.ClientSession()
yield Team(team_data, session)
await session.close()
@pytest.fixture()
def fixture():
return Fixture(fixture_data)
@pytest.fixture()
async def h2h_league():
session = aiohttp.ClientSession()
yield H2HLeague(h2h_league_data, session)
await session.close()
@pytest.fixture()
async def user():
session = aiohttp.ClientSession()
yield User(user_data, session)
await session.close()
| true
| true
|
f7194ae6ce49e55ac49b2ee6f18f31f1b76f58c1
| 35,332
|
py
|
Python
|
test/test_restful.py
|
ueno/keylime
|
480c9b107c155e7a20442afe3be929cf5d50fb86
|
[
"Apache-2.0"
] | null | null | null |
test/test_restful.py
|
ueno/keylime
|
480c9b107c155e7a20442afe3be929cf5d50fb86
|
[
"Apache-2.0"
] | null | null | null |
test/test_restful.py
|
ueno/keylime
|
480c9b107c155e7a20442afe3be929cf5d50fb86
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
NOTE:
This unittest is being used as a procedural test.
The tests must be run in-order and CANNOT be parallelized!
Tests all but two RESTful interfaces:
* agent's POST /v2/keys/vkey
- Done by CV after the CV's POST /v2/agents/{UUID} command is performed
* CV's PUT /v2/agents/{UUID}
- POST already bootstraps agent, so PUT is redundant in this test
The registrar's PUT vactivate interface is only tested if a vTPM is present!
USAGE:
Should be run in test directory under root privileges with either command:
* python -m unittest -v test_restful
* green -vv
(with `pip install green`)
To run without root privileges, be sure to export KEYLIME_TEST=True
For Python Coverage support (pip install coverage), set env COVERAGE_FILE and:
* coverage run --parallel-mode test_restful.py
'''
import sys
import signal
import unittest
import subprocess
import time
import os
import base64
import threading
import shutil
import errno
from pathlib import Path
import dbus
import simplejson as json
from keylime import config
from keylime import tornado_requests
from keylime.requests_client import RequestsClient
from keylime import tenant
from keylime import crypto
from keylime.cmd import user_data_encrypt
from keylime import secure_mount
from keylime.tpm import tpm_main
from keylime.tpm import tpm_abstract
# Coverage support
if "COVERAGE_FILE" in os.environ:
FORK_ARGS = ["coverage", "run", "--parallel-mode"]
if "COVERAGE_DIR" in os.environ:
FORK_ARGS += ["--rcfile=" + os.environ["COVERAGE_DIR"] + "/.coveragerc"]
else:
FORK_ARGS = ["python3"]
# Custom imports
PACKAGE_ROOT = Path(__file__).parents[1]
KEYLIME_DIR = (f"{PACKAGE_ROOT}/keylime")
sys.path.append(KEYLIME_DIR)
# Custom imports
# PACKAGE_ROOT = Path(__file__).parents[1]
# CODE_ROOT = (f"{PACKAGE_ROOT}/keylime/")
# sys.path.insert(0, CODE_ROOT)
# Will be used to communicate with the TPM
tpm_instance = None
# cmp depreciated in Python 3, so lets recreate it.
def cmp(a, b):
return (a > b) - (a < b)
# Ensure this is run as root
if os.geteuid() != 0 and config.REQUIRE_ROOT:
sys.exit("Tests need to be run with root privileges, or set env KEYLIME_TEST=True!")
# Force sorting tests alphabetically
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: cmp(x, y)
# Environment to pass to services
script_env = os.environ.copy()
# Globals to keep track of Keylime components
cv_process = None
reg_process = None
agent_process = None
tenant_templ = None
# Class-level components that are not static (so can't be added to test class)
public_key = None
keyblob = None
ek_tpm = None
aik_tpm = None
vtpm = False
# Set up mTLS
my_cert = config.get('tenant', 'my_cert')
my_priv_key = config.get('tenant', 'private_key')
cert = (my_cert, my_priv_key)
tls_enabled = True
# Like os.remove, but ignore file DNE exceptions
def fileRemove(path):
try:
os.remove(path)
except OSError as e:
# Ignore if file does not exist
if e.errno != errno.ENOENT:
raise
# Boring setup stuff
def setUpModule():
try:
env = os.environ.copy()
env['PATH'] = env['PATH'] + ":/usr/local/bin"
# Run init_tpm_server and tpm_serverd (start fresh)
its = subprocess.Popen(["init_tpm_server"], shell=False, env=env)
its.wait()
tsd = subprocess.Popen(["tpm_serverd"], shell=False, env=env)
tsd.wait()
except Exception as e:
print("WARNING: Restarting TPM emulator failed!")
# Note: the following is required as abrmd is failing to reconnect to MSSIM, once
# MSSIM is killed and restarted. If this is an proved an actual bug and is
# fixed upstream, the following dbus restart call can be removed.
try:
sysbus = dbus.SystemBus()
systemd1 = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
# If the systemd service exists, let's restart it.
for service in sysbus.list_names():
if "com.intel.tss2.Tabrmd" in service:
print("Found dbus service:", str(service))
try:
print("Restarting tpm2-abrmd.service.")
manager.RestartUnit('tpm2-abrmd.service', 'fail')
except dbus.exceptions.DBusException as e:
print(e)
except Exception as e:
print("Non systemd agent detected, no tpm2-abrmd restart required.")
try:
# Start with a clean slate for this test
fileRemove(config.WORK_DIR + "/tpmdata.yaml")
fileRemove(config.WORK_DIR + "/cv_data.sqlite")
fileRemove(config.WORK_DIR + "/reg_data.sqlite")
shutil.rmtree(config.WORK_DIR + "/cv_ca", True)
except Exception as e:
print("WARNING: Cleanup of TPM files failed!")
# CV must be run first to create CA and certs!
launch_cloudverifier()
launch_registrar()
# launch_cloudagent()
# Make the Tenant do a lot of set-up work for us
global tenant_templ
tenant_templ = tenant.Tenant()
tenant_templ.agent_uuid = config.get('cloud_agent', 'agent_uuid')
tenant_templ.cloudagent_ip = "localhost"
tenant_templ.cloudagent_port = config.get('cloud_agent', 'cloudagent_port')
tenant_templ.verifier_ip = config.get('cloud_verifier', 'cloudverifier_ip')
tenant_templ.verifier_port = config.get('cloud_verifier', 'cloudverifier_port')
tenant_templ.registrar_ip = config.get('registrar', 'registrar_ip')
tenant_templ.registrar_boot_port = config.get('registrar', 'registrar_port')
tenant_templ.registrar_tls_boot_port = config.get('registrar', 'registrar_tls_port')
tenant_templ.registrar_base_url = f'{tenant_templ.registrar_ip}:{tenant_templ.registrar_boot_port}'
tenant_templ.registrar_base_tls_url = f'{tenant_templ.registrar_ip}:{tenant_templ.registrar_tls_boot_port}'
tenant_templ.agent_base_url = f'{tenant_templ.cloudagent_ip}:{tenant_templ.cloudagent_port}'
# Set up TLS
my_tls_cert, my_tls_priv_key = tenant_templ.get_tls_context()
tenant_templ.cert = (my_tls_cert, my_tls_priv_key)
# Destroy everything on teardown
def tearDownModule():
# Tear down in reverse order of dependencies
kill_cloudagent()
kill_cloudverifier()
kill_registrar()
def launch_cloudverifier():
"""Start up the cloud verifier"""
global cv_process, script_env, FORK_ARGS
if cv_process is None:
cv_process = subprocess.Popen("keylime_verifier",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[96m' + "\nCloud Verifier Thread" + '\033[0m')
while True:
line = cv_process.stdout.readline()
if line == b'':
break
line = line.decode('utf-8')
line = line.rstrip(os.linesep)
sys.stdout.flush()
sys.stdout.write('\n\033[96m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(30)
return True
def launch_registrar():
"""Start up the registrar"""
global reg_process, script_env, FORK_ARGS
if reg_process is None:
reg_process = subprocess.Popen("keylime_registrar",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[95m' + "\nRegistrar Thread" + '\033[0m')
while True:
line = reg_process.stdout.readline()
if line == b"":
break
# line = line.rstrip(os.linesep)
line = line.decode('utf-8')
sys.stdout.flush()
sys.stdout.write('\n\033[95m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(10)
return True
def launch_cloudagent():
"""Start up the cloud agent"""
global agent_process, script_env, FORK_ARGS
if agent_process is None:
agent_process = subprocess.Popen("keylime_agent",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[94m' + "\nCloud Agent Thread" + '\033[0m')
while True:
line = agent_process.stdout.readline()
if line == b'':
break
# line = line.rstrip(os.linesep)
line = line.decode('utf-8')
sys.stdout.flush()
sys.stdout.write('\n\033[94m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(10)
return True
def kill_cloudverifier():
"""Kill the cloud verifier"""
global cv_process
if cv_process is None:
return
os.killpg(os.getpgid(cv_process.pid), signal.SIGINT)
cv_process.wait()
cv_process = None
def kill_registrar():
"""Kill the registrar"""
global reg_process
if reg_process is None:
return
os.killpg(os.getpgid(reg_process.pid), signal.SIGINT)
reg_process.wait()
reg_process = None
def kill_cloudagent():
"""Kill the cloud agent"""
global agent_process
if agent_process is None:
return
os.killpg(os.getpgid(agent_process.pid), signal.SIGINT)
agent_process.wait()
agent_process = None
def services_running():
if reg_process.poll() is None and cv_process.poll() is None:
return True
return False
class TestRestful(unittest.TestCase):
# Static class members (won't change between tests)
payload = None
auth_tag = None
tpm_policy = {}
vtpm_policy = {}
metadata = {}
allowlist = {}
revocation_key = ""
mb_refstate = None
K = None
U = None
V = None
api_version = config.API_VERSION
cloudagent_ip = None
cloudagent_port = None
@classmethod
def setUpClass(cls):
"""Prepare the keys and payload to give to the CV"""
contents = "random garbage to test as payload"
# contents = contents.encode('utf-8')
ret = user_data_encrypt.encrypt(contents)
cls.K = ret['k']
cls.U = ret['u']
cls.V = ret['v']
cls.payload = ret['ciphertext']
# Set up to register an agent
cls.auth_tag = crypto.do_hmac(cls.K, tenant_templ.agent_uuid)
# Prepare policies for agent
cls.tpm_policy = config.get('tenant', 'tpm_policy')
cls.vtpm_policy = config.get('tenant', 'vtpm_policy')
cls.tpm_policy = tpm_abstract.TPM_Utilities.readPolicy(cls.tpm_policy)
cls.vtpm_policy = tpm_abstract.TPM_Utilities.readPolicy(cls.vtpm_policy)
# Allow targeting a specific API version (default latest)
cls.api_version = config.API_VERSION
def setUp(self):
"""Nothing to set up before each test"""
return
def test_000_services(self):
"""Ensure everyone is running before doing tests"""
self.assertTrue(services_running(), "Not all services started successfully!")
# Registrar Testset
def test_010_reg_agent_post(self):
"""Test registrar's POST /v2/agents/{UUID} Interface"""
global keyblob, vtpm, tpm_instance, ek_tpm, aik_tpm
tpm_instance = tpm_main.tpm()
# Change CWD for TPM-related operations
cwd = os.getcwd()
config.ch_dir(config.WORK_DIR, None)
_ = secure_mount.mount()
# Initialize the TPM with AIK
(ekcert, ek_tpm, aik_tpm) = tpm_instance.tpm_init(self_activate=False,
config_pw=config.get('cloud_agent', 'tpm_ownerpassword'))
vtpm = tpm_instance.is_vtpm()
# Handle virtualized and emulated TPMs
if ekcert is None:
if vtpm:
ekcert = 'virtual'
elif tpm_instance.is_emulator():
ekcert = 'emulator'
# Get back to our original CWD
config.ch_dir(cwd, None)
data = {
'ekcert': ekcert,
'aik_tpm': aik_tpm,
}
if ekcert is None or ekcert == 'emulator':
data['ek_tpm'] = ek_tpm
test_010_reg_agent_post = RequestsClient(tenant_templ.registrar_base_url, tls_enabled=False)
response = test_010_reg_agent_post.post(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent Add return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("blob", json_response["results"], "Malformed response body!")
keyblob = json_response["results"]["blob"]
self.assertIsNotNone(keyblob, "Malformed response body!")
@unittest.skipIf(vtpm, "Registrar's PUT /v2/agents/{UUID}/activate only for non-vTPMs!")
def test_011_reg_agent_activate_put(self):
"""Test registrar's PUT /v2/agents/{UUID}/activate Interface"""
global keyblob
self.assertIsNotNone(keyblob, "Required value not set. Previous step may have failed?")
key = tpm_instance.activate_identity(keyblob)
data = {
'auth_tag': crypto.do_hmac(key, tenant_templ.agent_uuid),
}
test_011_reg_agent_activate_put = RequestsClient(tenant_templ.registrar_base_url, tls_enabled=False)
response = test_011_reg_agent_activate_put.put(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}/activate',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent Activate return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_013_reg_agents_get(self):
"""Test registrar's GET /v2/agents Interface"""
test_013_reg_agents_get = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_013_reg_agents_get.get(
f'/v{self.api_version}/agents/',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent List return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("uuids", json_response["results"], "Malformed response body!")
# We registered exactly one agent so far
self.assertEqual(1, len(json_response["results"]["uuids"]), "Incorrect system state!")
def test_014_reg_agent_get(self):
"""Test registrar's GET /v2/agents/{UUID} Interface"""
test_014_reg_agent_get = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_014_reg_agent_get.get(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("ek_tpm", json_response["results"], "Malformed response body!")
self.assertIn("aik_tpm", json_response["results"], "Malformed response body!")
self.assertIn("ekcert", json_response["results"], "Malformed response body!")
global aik_tpm
aik_tpm = json_response["results"]["aik_tpm"]
def test_015_reg_agent_delete(self):
"""Test registrar's DELETE /v2/agents/{UUID} Interface"""
test_015_reg_agent_delete = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_015_reg_agent_delete.delete(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar Delete return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
# Agent Setup Testset
def test_020_agent_keys_pubkey_get(self):
"""Test agent's GET /v2/keys/pubkey Interface"""
# We want a real cloud agent to communicate with!
launch_cloudagent()
time.sleep(10)
test_020_agent_keys_pubkey_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_020_agent_keys_pubkey_get.get(
f'/v{self.api_version}/keys/pubkey',
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent pubkey return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
global public_key
public_key = json_response["results"]["pubkey"]
self.assertNotEqual(public_key, None, "Malformed response body!")
def test_021_reg_agent_get(self):
# We need to refresh the aik value we've stored in case it changed
self.test_014_reg_agent_get()
def test_022_agent_quotes_identity_get(self):
"""Test agent's GET /v2/quotes/identity Interface"""
self.assertIsNotNone(aik_tpm, "Required value not set. Previous step may have failed?")
nonce = tpm_abstract.TPM_Utilities.random_password(20)
numretries = config.getint('tenant', 'max_retries')
while numretries >= 0:
test_022_agent_quotes_identity_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_022_agent_quotes_identity_get.get(
f'/v{self.api_version}/quotes/identity?nonce={nonce}',
data=None,
cert="",
verify=False
)
if response.status_code == 200:
break
numretries -= 1
time.sleep(config.getint('tenant', 'retry_interval'))
self.assertEqual(response.status_code, 200, "Non-successful Agent identity return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("quote", json_response["results"], "Malformed response body!")
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
# Check the quote identity
self.assertTrue(tpm_instance.check_quote(tenant_templ.agent_uuid,
nonce,
json_response["results"]["pubkey"],
json_response["results"]["quote"],
aik_tpm,
hash_alg=json_response["results"]["hash_alg"]),
"Invalid quote!")
@unittest.skip("Testing of agent's POST /v2/keys/vkey disabled! (spawned CV should do this already)")
def test_023_agent_keys_vkey_post(self):
"""Test agent's POST /v2/keys/vkey Interface"""
# CV should do this (during CV POST/PUT test)
# Running this test might hide problems with the CV sending the V key
global public_key
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(public_key, "Required value not set. Previous step may have failed?")
encrypted_V = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key), str(self.V))
b64_encrypted_V = base64.b64encode(encrypted_V)
data = {'encrypted_key': b64_encrypted_V}
test_023_agent_keys_vkey_post = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_023_agent_keys_vkey_post.post(
f'/v{self.api_version}/keys/vkey',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent vkey post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_024_agent_keys_ukey_post(self):
"""Test agents's POST /v2/keys/ukey Interface"""
global public_key
self.assertIsNotNone(public_key, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.U, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.auth_tag, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.payload, "Required value not set. Previous step may have failed?")
encrypted_U = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key), self.U)
b64_encrypted_u = base64.b64encode(encrypted_U)
data = {
'encrypted_key': b64_encrypted_u,
'auth_tag': self.auth_tag,
'payload': self.payload
}
test_024_agent_keys_ukey_post = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_024_agent_keys_ukey_post.post(
f'/v{self.api_version}/keys/ukey',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent ukey post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_025_cv_allowlist_post(self):
"""Test CV's POST /v2/allowlist/{name} Interface"""
data = {
'name': 'test-allowlist',
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'ima_policy': json.dumps(self.allowlist),
}
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.post(
'/allowlists/test-allowlist',
data=json.dumps(data),
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 201, "Non-successful CV allowlist Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_026_cv_allowlist_get(self):
"""Test CV's GET /v2/allowlists/{name} Interface"""
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.get(
'/allowlists/test-allowlist',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV allowlist Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
results = json_response['results']
self.assertEqual(results['name'], 'test-allowlist')
self.assertEqual(results['tpm_policy'], json.dumps(self.tpm_policy))
self.assertEqual(results['vtpm_policy'], json.dumps(self.vtpm_policy))
self.assertEqual(results['ima_policy'], json.dumps(self.allowlist))
def test_027_cv_allowlist_delete(self):
"""Test CV's DELETE /v2/allowlists/{name} Interface"""
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.delete(
'/allowlists/test-allowlist',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 204, "Non-successful CV allowlist Delete return code!")
# Cloud Verifier Testset
def test_030_cv_agent_post(self):
"""Test CV's POST /v2/agents/{UUID} Interface"""
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
b64_v = base64.b64encode(self.V)
data = {
'v': b64_v,
'cloudagent_ip': tenant_templ.cloudagent_ip,
'cloudagent_port': tenant_templ.cloudagent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(self.allowlist),
'ima_sign_verification_keys': '',
'mb_refstate': None,
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': config.get('tenant', 'accept_tpm_hash_algs').split(','),
'accept_tpm_encryption_algs': config.get('tenant', 'accept_tpm_encryption_algs').split(','),
'accept_tpm_signing_algs': config.get('tenant', 'accept_tpm_signing_algs').split(','),
}
test_030_cv_agent_post = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_030_cv_agent_post.post(
f'/agents/{tenant_templ.agent_uuid}',
data=json.dumps(data),
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
time.sleep(10)
@unittest.skip("Testing of CV's PUT /v2/agents/{UUID} disabled!")
def test_031_cv_agent_put(self):
"""Test CV's PUT /v2/agents/{UUID} Interface"""
# TODO: this should actually test PUT functionality (e.g., make agent fail and then PUT back up)
test_031_cv_agent_put = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_031_cv_agent_put.put(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
data=b'',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_032_cv_agents_get(self):
"""Test CV's GET /v2/agents Interface"""
test_032_cv_agents_get = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_032_cv_agents_get.get(
f'/v{self.api_version}/agents/',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent List return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("uuids", json_response["results"], "Malformed response body!")
# Be sure our agent is registered
self.assertEqual(1, len(json_response["results"]["uuids"]))
def test_033_cv_agent_get(self):
"""Test CV's GET /v2/agents/{UUID} Interface"""
test_033_cv_agent_get = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_033_cv_agent_get.get(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
# Check a few of the important properties are present
self.assertIn("operational_state", json_response["results"], "Malformed response body!")
self.assertIn("ip", json_response["results"], "Malformed response body!")
self.assertIn("port", json_response["results"], "Malformed response body!")
def test_034_cv_agent_post_invalid_exclude_list(self):
"""Test CV's POST /v2/agents/{UUID} Interface"""
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
b64_v = base64.b64encode(self.V)
# Set unsupported regex in exclude list
allowlist = {'exclude': ['*']}
data = {
'v': b64_v,
'mb_refstate': None,
'cloudagent_ip': tenant_templ.cloudagent_ip,
'cloudagent_port': tenant_templ.cloudagent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(allowlist),
'ima_sign_verification_keys': '',
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': config.get('tenant', 'accept_tpm_hash_algs').split(','),
'accept_tpm_encryption_algs': config.get('tenant', 'accept_tpm_encryption_algs').split(','),
'accept_tpm_signing_algs': config.get('tenant', 'accept_tpm_signing_algs').split(','),
}
client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = client.post(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
data=json.dumps(data),
verify=False
)
self.assertEqual(response.status_code, 400, "Successful CV agent Post return code!")
# Ensure response is well-formed
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
# Agent Poll Testset
def test_040_agent_quotes_integrity_get(self):
"""Test agent's GET /v2/quotes/integrity Interface"""
global public_key
self.assertIsNotNone(aik_tpm, "Required value not set. Previous step may have failed?")
nonce = tpm_abstract.TPM_Utilities.random_password(20)
mask = self.tpm_policy["mask"]
vmask = self.vtpm_policy["mask"]
partial = "1"
if public_key is None:
partial = "0"
test_040_agent_quotes_integrity_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_040_agent_quotes_integrity_get.get(
f'/v{self.api_version}/quotes/integrity?nonce={nonce}&mask={mask}&vmask={vmask}&partial={partial}',
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent Integrity Get return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("quote", json_response["results"], "Malformed response body!")
if public_key is None:
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
public_key = json_response["results"]["pubkey"]
self.assertIn("hash_alg", json_response["results"], "Malformed response body!")
quote = json_response["results"]["quote"]
hash_alg = json_response["results"]["hash_alg"]
validQuote = tpm_instance.check_quote(tenant_templ.agent_uuid,
nonce,
public_key,
quote,
aik_tpm,
self.tpm_policy,
hash_alg=hash_alg)
self.assertTrue(validQuote)
async def test_041_agent_keys_verify_get(self):
"""Test agent's GET /v2/keys/verify Interface
We use async here to allow function await while key processes"""
self.assertIsNotNone(self.K, "Required value not set. Previous step may have failed?")
challenge = tpm_abstract.TPM_Utilities.random_password(20)
encoded = base64.b64encode(self.K).decode('utf-8')
response = tornado_requests.request("GET",
"http://%s:%s/keys/verify?challenge=%s" % (self.cloudagent_ip, self.cloudagent_port, challenge))
response = await response
self.assertEqual(response.status, 200, "Non-successful Agent verify return code!")
json_response = json.loads(response.read().decode())
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("hmac", json_response["results"], "Malformed response body!")
# Be sure response is valid
mac = json_response['results']['hmac']
ex_mac = crypto.do_hmac(encoded, challenge)
# ex_mac = crypto.do_hmac(self.K, challenge)
self.assertEqual(mac, ex_mac, "Agent failed to validate challenge code!")
# CV Cleanup Testset
def test_050_cv_agent_delete(self):
"""Test CV's DELETE /v2/agents/{UUID} Interface"""
time.sleep(5)
test_050_cv_agent_delete = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_050_cv_agent_delete.delete(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 202, "Non-successful CV agent Delete return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def tearDown(self):
"""Nothing to bring down after each test"""
return
@classmethod
def tearDownClass(cls):
"""Nothing to bring down"""
return
if __name__ == '__main__':
unittest.main()
| 38.997792
| 140
| 0.635656
|
import sys
import signal
import unittest
import subprocess
import time
import os
import base64
import threading
import shutil
import errno
from pathlib import Path
import dbus
import simplejson as json
from keylime import config
from keylime import tornado_requests
from keylime.requests_client import RequestsClient
from keylime import tenant
from keylime import crypto
from keylime.cmd import user_data_encrypt
from keylime import secure_mount
from keylime.tpm import tpm_main
from keylime.tpm import tpm_abstract
if "COVERAGE_FILE" in os.environ:
FORK_ARGS = ["coverage", "run", "--parallel-mode"]
if "COVERAGE_DIR" in os.environ:
FORK_ARGS += ["--rcfile=" + os.environ["COVERAGE_DIR"] + "/.coveragerc"]
else:
FORK_ARGS = ["python3"]
PACKAGE_ROOT = Path(__file__).parents[1]
KEYLIME_DIR = (f"{PACKAGE_ROOT}/keylime")
sys.path.append(KEYLIME_DIR)
tpm_instance = None
def cmp(a, b):
return (a > b) - (a < b)
if os.geteuid() != 0 and config.REQUIRE_ROOT:
sys.exit("Tests need to be run with root privileges, or set env KEYLIME_TEST=True!")
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: cmp(x, y)
script_env = os.environ.copy()
cv_process = None
reg_process = None
agent_process = None
tenant_templ = None
public_key = None
keyblob = None
ek_tpm = None
aik_tpm = None
vtpm = False
# Set up mTLS
my_cert = config.get('tenant', 'my_cert')
my_priv_key = config.get('tenant', 'private_key')
cert = (my_cert, my_priv_key)
tls_enabled = True
# Like os.remove, but ignore file DNE exceptions
def fileRemove(path):
try:
os.remove(path)
except OSError as e:
# Ignore if file does not exist
if e.errno != errno.ENOENT:
raise
# Boring setup stuff
def setUpModule():
try:
env = os.environ.copy()
env['PATH'] = env['PATH'] + ":/usr/local/bin"
# Run init_tpm_server and tpm_serverd (start fresh)
its = subprocess.Popen(["init_tpm_server"], shell=False, env=env)
its.wait()
tsd = subprocess.Popen(["tpm_serverd"], shell=False, env=env)
tsd.wait()
except Exception as e:
print("WARNING: Restarting TPM emulator failed!")
# Note: the following is required as abrmd is failing to reconnect to MSSIM, once
# MSSIM is killed and restarted. If this is an proved an actual bug and is
# fixed upstream, the following dbus restart call can be removed.
try:
sysbus = dbus.SystemBus()
systemd1 = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
# If the systemd service exists, let's restart it.
for service in sysbus.list_names():
if "com.intel.tss2.Tabrmd" in service:
print("Found dbus service:", str(service))
try:
print("Restarting tpm2-abrmd.service.")
manager.RestartUnit('tpm2-abrmd.service', 'fail')
except dbus.exceptions.DBusException as e:
print(e)
except Exception as e:
print("Non systemd agent detected, no tpm2-abrmd restart required.")
try:
fileRemove(config.WORK_DIR + "/tpmdata.yaml")
fileRemove(config.WORK_DIR + "/cv_data.sqlite")
fileRemove(config.WORK_DIR + "/reg_data.sqlite")
shutil.rmtree(config.WORK_DIR + "/cv_ca", True)
except Exception as e:
print("WARNING: Cleanup of TPM files failed!")
launch_cloudverifier()
launch_registrar()
global tenant_templ
tenant_templ = tenant.Tenant()
tenant_templ.agent_uuid = config.get('cloud_agent', 'agent_uuid')
tenant_templ.cloudagent_ip = "localhost"
tenant_templ.cloudagent_port = config.get('cloud_agent', 'cloudagent_port')
tenant_templ.verifier_ip = config.get('cloud_verifier', 'cloudverifier_ip')
tenant_templ.verifier_port = config.get('cloud_verifier', 'cloudverifier_port')
tenant_templ.registrar_ip = config.get('registrar', 'registrar_ip')
tenant_templ.registrar_boot_port = config.get('registrar', 'registrar_port')
tenant_templ.registrar_tls_boot_port = config.get('registrar', 'registrar_tls_port')
tenant_templ.registrar_base_url = f'{tenant_templ.registrar_ip}:{tenant_templ.registrar_boot_port}'
tenant_templ.registrar_base_tls_url = f'{tenant_templ.registrar_ip}:{tenant_templ.registrar_tls_boot_port}'
tenant_templ.agent_base_url = f'{tenant_templ.cloudagent_ip}:{tenant_templ.cloudagent_port}'
my_tls_cert, my_tls_priv_key = tenant_templ.get_tls_context()
tenant_templ.cert = (my_tls_cert, my_tls_priv_key)
def tearDownModule():
kill_cloudagent()
kill_cloudverifier()
kill_registrar()
def launch_cloudverifier():
global cv_process, script_env, FORK_ARGS
if cv_process is None:
cv_process = subprocess.Popen("keylime_verifier",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[96m' + "\nCloud Verifier Thread" + '\033[0m')
while True:
line = cv_process.stdout.readline()
if line == b'':
break
line = line.decode('utf-8')
line = line.rstrip(os.linesep)
sys.stdout.flush()
sys.stdout.write('\n\033[96m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(30)
return True
def launch_registrar():
global reg_process, script_env, FORK_ARGS
if reg_process is None:
reg_process = subprocess.Popen("keylime_registrar",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[95m' + "\nRegistrar Thread" + '\033[0m')
while True:
line = reg_process.stdout.readline()
if line == b"":
break
line = line.decode('utf-8')
sys.stdout.flush()
sys.stdout.write('\n\033[95m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(10)
return True
def launch_cloudagent():
global agent_process, script_env, FORK_ARGS
if agent_process is None:
agent_process = subprocess.Popen("keylime_agent",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[94m' + "\nCloud Agent Thread" + '\033[0m')
while True:
line = agent_process.stdout.readline()
if line == b'':
break
line = line.decode('utf-8')
sys.stdout.flush()
sys.stdout.write('\n\033[94m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(10)
return True
def kill_cloudverifier():
global cv_process
if cv_process is None:
return
os.killpg(os.getpgid(cv_process.pid), signal.SIGINT)
cv_process.wait()
cv_process = None
def kill_registrar():
global reg_process
if reg_process is None:
return
os.killpg(os.getpgid(reg_process.pid), signal.SIGINT)
reg_process.wait()
reg_process = None
def kill_cloudagent():
global agent_process
if agent_process is None:
return
os.killpg(os.getpgid(agent_process.pid), signal.SIGINT)
agent_process.wait()
agent_process = None
def services_running():
if reg_process.poll() is None and cv_process.poll() is None:
return True
return False
class TestRestful(unittest.TestCase):
payload = None
auth_tag = None
tpm_policy = {}
vtpm_policy = {}
metadata = {}
allowlist = {}
revocation_key = ""
mb_refstate = None
K = None
U = None
V = None
api_version = config.API_VERSION
cloudagent_ip = None
cloudagent_port = None
@classmethod
def setUpClass(cls):
contents = "random garbage to test as payload"
# contents = contents.encode('utf-8')
ret = user_data_encrypt.encrypt(contents)
cls.K = ret['k']
cls.U = ret['u']
cls.V = ret['v']
cls.payload = ret['ciphertext']
# Set up to register an agent
cls.auth_tag = crypto.do_hmac(cls.K, tenant_templ.agent_uuid)
# Prepare policies for agent
cls.tpm_policy = config.get('tenant', 'tpm_policy')
cls.vtpm_policy = config.get('tenant', 'vtpm_policy')
cls.tpm_policy = tpm_abstract.TPM_Utilities.readPolicy(cls.tpm_policy)
cls.vtpm_policy = tpm_abstract.TPM_Utilities.readPolicy(cls.vtpm_policy)
# Allow targeting a specific API version (default latest)
cls.api_version = config.API_VERSION
def setUp(self):
return
def test_000_services(self):
self.assertTrue(services_running(), "Not all services started successfully!")
# Registrar Testset
def test_010_reg_agent_post(self):
global keyblob, vtpm, tpm_instance, ek_tpm, aik_tpm
tpm_instance = tpm_main.tpm()
# Change CWD for TPM-related operations
cwd = os.getcwd()
config.ch_dir(config.WORK_DIR, None)
_ = secure_mount.mount()
# Initialize the TPM with AIK
(ekcert, ek_tpm, aik_tpm) = tpm_instance.tpm_init(self_activate=False,
config_pw=config.get('cloud_agent', 'tpm_ownerpassword'))
vtpm = tpm_instance.is_vtpm()
# Handle virtualized and emulated TPMs
if ekcert is None:
if vtpm:
ekcert = 'virtual'
elif tpm_instance.is_emulator():
ekcert = 'emulator'
# Get back to our original CWD
config.ch_dir(cwd, None)
data = {
'ekcert': ekcert,
'aik_tpm': aik_tpm,
}
if ekcert is None or ekcert == 'emulator':
data['ek_tpm'] = ek_tpm
test_010_reg_agent_post = RequestsClient(tenant_templ.registrar_base_url, tls_enabled=False)
response = test_010_reg_agent_post.post(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent Add return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("blob", json_response["results"], "Malformed response body!")
keyblob = json_response["results"]["blob"]
self.assertIsNotNone(keyblob, "Malformed response body!")
@unittest.skipIf(vtpm, "Registrar's PUT /v2/agents/{UUID}/activate only for non-vTPMs!")
def test_011_reg_agent_activate_put(self):
global keyblob
self.assertIsNotNone(keyblob, "Required value not set. Previous step may have failed?")
key = tpm_instance.activate_identity(keyblob)
data = {
'auth_tag': crypto.do_hmac(key, tenant_templ.agent_uuid),
}
test_011_reg_agent_activate_put = RequestsClient(tenant_templ.registrar_base_url, tls_enabled=False)
response = test_011_reg_agent_activate_put.put(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}/activate',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent Activate return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
def test_013_reg_agents_get(self):
test_013_reg_agents_get = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_013_reg_agents_get.get(
f'/v{self.api_version}/agents/',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent List return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("uuids", json_response["results"], "Malformed response body!")
self.assertEqual(1, len(json_response["results"]["uuids"]), "Incorrect system state!")
def test_014_reg_agent_get(self):
test_014_reg_agent_get = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_014_reg_agent_get.get(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("ek_tpm", json_response["results"], "Malformed response body!")
self.assertIn("aik_tpm", json_response["results"], "Malformed response body!")
self.assertIn("ekcert", json_response["results"], "Malformed response body!")
global aik_tpm
aik_tpm = json_response["results"]["aik_tpm"]
def test_015_reg_agent_delete(self):
test_015_reg_agent_delete = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_015_reg_agent_delete.delete(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar Delete return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
def test_020_agent_keys_pubkey_get(self):
launch_cloudagent()
time.sleep(10)
test_020_agent_keys_pubkey_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_020_agent_keys_pubkey_get.get(
f'/v{self.api_version}/keys/pubkey',
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent pubkey return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
global public_key
public_key = json_response["results"]["pubkey"]
self.assertNotEqual(public_key, None, "Malformed response body!")
def test_021_reg_agent_get(self):
self.test_014_reg_agent_get()
def test_022_agent_quotes_identity_get(self):
self.assertIsNotNone(aik_tpm, "Required value not set. Previous step may have failed?")
nonce = tpm_abstract.TPM_Utilities.random_password(20)
numretries = config.getint('tenant', 'max_retries')
while numretries >= 0:
test_022_agent_quotes_identity_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_022_agent_quotes_identity_get.get(
f'/v{self.api_version}/quotes/identity?nonce={nonce}',
data=None,
cert="",
verify=False
)
if response.status_code == 200:
break
numretries -= 1
time.sleep(config.getint('tenant', 'retry_interval'))
self.assertEqual(response.status_code, 200, "Non-successful Agent identity return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("quote", json_response["results"], "Malformed response body!")
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
# Check the quote identity
self.assertTrue(tpm_instance.check_quote(tenant_templ.agent_uuid,
nonce,
json_response["results"]["pubkey"],
json_response["results"]["quote"],
aik_tpm,
hash_alg=json_response["results"]["hash_alg"]),
"Invalid quote!")
@unittest.skip("Testing of agent's POST /v2/keys/vkey disabled! (spawned CV should do this already)")
def test_023_agent_keys_vkey_post(self):
global public_key
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(public_key, "Required value not set. Previous step may have failed?")
encrypted_V = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key), str(self.V))
b64_encrypted_V = base64.b64encode(encrypted_V)
data = {'encrypted_key': b64_encrypted_V}
test_023_agent_keys_vkey_post = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_023_agent_keys_vkey_post.post(
f'/v{self.api_version}/keys/vkey',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent vkey post return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
def test_024_agent_keys_ukey_post(self):
global public_key
self.assertIsNotNone(public_key, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.U, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.auth_tag, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.payload, "Required value not set. Previous step may have failed?")
encrypted_U = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key), self.U)
b64_encrypted_u = base64.b64encode(encrypted_U)
data = {
'encrypted_key': b64_encrypted_u,
'auth_tag': self.auth_tag,
'payload': self.payload
}
test_024_agent_keys_ukey_post = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_024_agent_keys_ukey_post.post(
f'/v{self.api_version}/keys/ukey',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent ukey post return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
def test_025_cv_allowlist_post(self):
data = {
'name': 'test-allowlist',
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'ima_policy': json.dumps(self.allowlist),
}
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.post(
'/allowlists/test-allowlist',
data=json.dumps(data),
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 201, "Non-successful CV allowlist Post return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
def test_026_cv_allowlist_get(self):
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.get(
'/allowlists/test-allowlist',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV allowlist Post return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
results = json_response['results']
self.assertEqual(results['name'], 'test-allowlist')
self.assertEqual(results['tpm_policy'], json.dumps(self.tpm_policy))
self.assertEqual(results['vtpm_policy'], json.dumps(self.vtpm_policy))
self.assertEqual(results['ima_policy'], json.dumps(self.allowlist))
def test_027_cv_allowlist_delete(self):
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.delete(
'/allowlists/test-allowlist',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 204, "Non-successful CV allowlist Delete return code!")
def test_030_cv_agent_post(self):
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
b64_v = base64.b64encode(self.V)
data = {
'v': b64_v,
'cloudagent_ip': tenant_templ.cloudagent_ip,
'cloudagent_port': tenant_templ.cloudagent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(self.allowlist),
'ima_sign_verification_keys': '',
'mb_refstate': None,
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': config.get('tenant', 'accept_tpm_hash_algs').split(','),
'accept_tpm_encryption_algs': config.get('tenant', 'accept_tpm_encryption_algs').split(','),
'accept_tpm_signing_algs': config.get('tenant', 'accept_tpm_signing_algs').split(','),
}
test_030_cv_agent_post = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_030_cv_agent_post.post(
f'/agents/{tenant_templ.agent_uuid}',
data=json.dumps(data),
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent Post return code!")
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
time.sleep(10)
@unittest.skip("Testing of CV's PUT /v2/agents/{UUID} disabled!")
def test_031_cv_agent_put(self):
# TODO: this should actually test PUT functionality (e.g., make agent fail and then PUT back up)
test_031_cv_agent_put = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_031_cv_agent_put.put(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
data=b'',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_032_cv_agents_get(self):
test_032_cv_agents_get = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_032_cv_agents_get.get(
f'/v{self.api_version}/agents/',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent List return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("uuids", json_response["results"], "Malformed response body!")
# Be sure our agent is registered
self.assertEqual(1, len(json_response["results"]["uuids"]))
def test_033_cv_agent_get(self):
test_033_cv_agent_get = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_033_cv_agent_get.get(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
# Check a few of the important properties are present
self.assertIn("operational_state", json_response["results"], "Malformed response body!")
self.assertIn("ip", json_response["results"], "Malformed response body!")
self.assertIn("port", json_response["results"], "Malformed response body!")
def test_034_cv_agent_post_invalid_exclude_list(self):
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
b64_v = base64.b64encode(self.V)
# Set unsupported regex in exclude list
allowlist = {'exclude': ['*']}
data = {
'v': b64_v,
'mb_refstate': None,
'cloudagent_ip': tenant_templ.cloudagent_ip,
'cloudagent_port': tenant_templ.cloudagent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(allowlist),
'ima_sign_verification_keys': '',
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': config.get('tenant', 'accept_tpm_hash_algs').split(','),
'accept_tpm_encryption_algs': config.get('tenant', 'accept_tpm_encryption_algs').split(','),
'accept_tpm_signing_algs': config.get('tenant', 'accept_tpm_signing_algs').split(','),
}
client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = client.post(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
data=json.dumps(data),
verify=False
)
self.assertEqual(response.status_code, 400, "Successful CV agent Post return code!")
# Ensure response is well-formed
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
# Agent Poll Testset
def test_040_agent_quotes_integrity_get(self):
global public_key
self.assertIsNotNone(aik_tpm, "Required value not set. Previous step may have failed?")
nonce = tpm_abstract.TPM_Utilities.random_password(20)
mask = self.tpm_policy["mask"]
vmask = self.vtpm_policy["mask"]
partial = "1"
if public_key is None:
partial = "0"
test_040_agent_quotes_integrity_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_040_agent_quotes_integrity_get.get(
f'/v{self.api_version}/quotes/integrity?nonce={nonce}&mask={mask}&vmask={vmask}&partial={partial}',
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent Integrity Get return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("quote", json_response["results"], "Malformed response body!")
if public_key is None:
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
public_key = json_response["results"]["pubkey"]
self.assertIn("hash_alg", json_response["results"], "Malformed response body!")
quote = json_response["results"]["quote"]
hash_alg = json_response["results"]["hash_alg"]
validQuote = tpm_instance.check_quote(tenant_templ.agent_uuid,
nonce,
public_key,
quote,
aik_tpm,
self.tpm_policy,
hash_alg=hash_alg)
self.assertTrue(validQuote)
async def test_041_agent_keys_verify_get(self):
self.assertIsNotNone(self.K, "Required value not set. Previous step may have failed?")
challenge = tpm_abstract.TPM_Utilities.random_password(20)
encoded = base64.b64encode(self.K).decode('utf-8')
response = tornado_requests.request("GET",
"http://%s:%s/keys/verify?challenge=%s" % (self.cloudagent_ip, self.cloudagent_port, challenge))
response = await response
self.assertEqual(response.status, 200, "Non-successful Agent verify return code!")
json_response = json.loads(response.read().decode())
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("hmac", json_response["results"], "Malformed response body!")
# Be sure response is valid
mac = json_response['results']['hmac']
ex_mac = crypto.do_hmac(encoded, challenge)
# ex_mac = crypto.do_hmac(self.K, challenge)
self.assertEqual(mac, ex_mac, "Agent failed to validate challenge code!")
# CV Cleanup Testset
def test_050_cv_agent_delete(self):
time.sleep(5)
test_050_cv_agent_delete = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_050_cv_agent_delete.delete(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 202, "Non-successful CV agent Delete return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def tearDown(self):
return
@classmethod
def tearDownClass(cls):
return
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7194b8167f283797e8754a97cb5a389e35e14ce
| 2,442
|
py
|
Python
|
hello_fastapi_project/hello_fastapi/backend/app/alembic/env.py
|
KimSoungRyoul/PersistenceLayerInPythonApplication
|
2431553a6cdd913babd546adc6c9376855eb3438
|
[
"MIT"
] | 2
|
2021-11-01T08:08:13.000Z
|
2021-11-01T08:11:51.000Z
|
hello_fastapi_project/hello_fastapi/backend/app/alembic/env.py
|
KimSoungRyoul/PersistenceLayerInPythonApplication
|
2431553a6cdd913babd546adc6c9376855eb3438
|
[
"MIT"
] | null | null | null |
hello_fastapi_project/hello_fastapi/backend/app/alembic/env.py
|
KimSoungRyoul/PersistenceLayerInPythonApplication
|
2431553a6cdd913babd546adc6c9376855eb3438
|
[
"MIT"
] | null | null | null |
from __future__ import with_statement
import os
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
# target_metadata = None
from app.db.base import Base # noqa
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def get_url():
user = os.getenv("POSTGRES_USER", "postgres")
password = os.getenv("POSTGRES_PASSWORD", "1234")
server = os.getenv("POSTGRES_SERVER", "127.0.0.1:5432")
db = os.getenv("POSTGRES_DB", "hello_fastapi_db")
return f"postgresql://{user}:{password}@{server}/{db}"
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = get_url()
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True, compare_type=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
configuration = config.get_section(config.config_ini_section)
configuration["sqlalchemy.url"] = get_url()
connectable = engine_from_config(
configuration, prefix="sqlalchemy.", poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata, compare_type=True
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 27.75
| 87
| 0.72154
|
from __future__ import with_statement
import os
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
config = context.config
fileConfig(config.config_file_name)
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
# target_metadata = None
from app.db.base import Base # noqa
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def get_url():
user = os.getenv("POSTGRES_USER", "postgres")
password = os.getenv("POSTGRES_PASSWORD", "1234")
server = os.getenv("POSTGRES_SERVER", "127.0.0.1:5432")
db = os.getenv("POSTGRES_DB", "hello_fastapi_db")
return f"postgresql://{user}:{password}@{server}/{db}"
def run_migrations_offline():
url = get_url()
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True, compare_type=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
configuration = config.get_section(config.config_ini_section)
configuration["sqlalchemy.url"] = get_url()
connectable = engine_from_config(
configuration, prefix="sqlalchemy.", poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata, compare_type=True
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| true
| true
|
f7194cecc0e0ff877121d721d5f9416a213d7ec6
| 129
|
py
|
Python
|
dan_socket/base.py
|
fredericowu/dan_socket
|
12ccbd8333b76889f6ee2050c78aba67f0a86533
|
[
"Apache-2.0"
] | null | null | null |
dan_socket/base.py
|
fredericowu/dan_socket
|
12ccbd8333b76889f6ee2050c78aba67f0a86533
|
[
"Apache-2.0"
] | null | null | null |
dan_socket/base.py
|
fredericowu/dan_socket
|
12ccbd8333b76889f6ee2050c78aba67f0a86533
|
[
"Apache-2.0"
] | null | null | null |
import socket
class BaseConnection:
PROTOCOL = {
"TCP": socket.SOCK_STREAM,
"UDP": socket.SOCK_DGRAM
}
| 14.333333
| 34
| 0.604651
|
import socket
class BaseConnection:
PROTOCOL = {
"TCP": socket.SOCK_STREAM,
"UDP": socket.SOCK_DGRAM
}
| true
| true
|
f7194cede6cfbfe8bf2f8a61911cecb70eada48c
| 4,031
|
py
|
Python
|
wok/contrib/hooks.py
|
chrplace/wok
|
a1368f6c6bc75e0b1b878b315bfd31ac8aefbabb
|
[
"MIT"
] | 38
|
2015-01-06T03:41:51.000Z
|
2019-09-18T22:06:28.000Z
|
wok/contrib/hooks.py
|
chrplace/wok
|
a1368f6c6bc75e0b1b878b315bfd31ac8aefbabb
|
[
"MIT"
] | 38
|
2015-02-12T09:33:24.000Z
|
2017-06-29T16:52:29.000Z
|
wok/contrib/hooks.py
|
chrplace/wok
|
a1368f6c6bc75e0b1b878b315bfd31ac8aefbabb
|
[
"MIT"
] | 21
|
2015-01-08T08:46:50.000Z
|
2020-01-28T23:59:40.000Z
|
# vim: set fileencoding=utf8 :
"""Some hooks that might be useful."""
import os
import glob
import subprocess
from StringIO import StringIO
import logging
from slugify import slugify
from wok.exceptions import DependencyException
try:
from lxml import etree
except ImportError:
etree = None
try:
import sass
except ImportError:
sass = None
class HeadingAnchors(object):
"""
Put some paragraph heading anchors.
Serves as a 'page.template.post' wok hook.
"""
def __init__(self, max_heading=3):
if not etree:
logging.warning('To use the HeadingAnchors hook, you must install '
'the library lxml.')
return
self.max_heading = max_heading
logging.info('Loaded hook HeadingAnchors')
def __call__(self, config, page):
if not etree:
return
logging.debug('Called hook HeadingAnchors on {0}'.format(page))
parser = etree.HTMLParser()
sio_source = StringIO(page.rendered)
tree = etree.parse(sio_source, parser)
for lvl in range(1, self.max_heading+1):
headings = tree.iterfind('//h{0}'.format(lvl))
for heading in headings:
if not heading.text:
continue
logging.debug('[HeadingAnchors] {0} {1}'
.format(heading, heading.text))
name = 'heading-{0}'.format(slugify(heading.text))
anchor = etree.Element('a')
anchor.set('class', 'heading_anchor')
anchor.set('href', '#' + name)
anchor.set('title', 'Permalink to this section.')
anchor.text = u'¶'
heading.append(anchor)
heading.set('id', name)
sio_destination = StringIO()
# Use the extension of the template to determine the type of document
if page.template.filename.endswith(".html") or page.filename.endswith(".htm"):
logging.debug('[HeadingAnchors] outputting {0} as HTML'.format(page))
tree.write(sio_destination, method='html')
else:
logging.debug('[HeadingAnchors] outputting {0} as XML'.format(page))
tree.write(sio_destination)
page.rendered = sio_destination.getvalue()
def compile_sass(config, output_dir):
'''
Compile Sass files -> CSS in the output directory.
Any .scss or .sass files found in the output directory will be compiled
to CSS using Sass. The compiled version of the file will be created in the
same directory as the Sass file with the same name and an extension of
.css. For example, foo.scss -> foo.css.
Serves as a 'site.output.post' wok hook, e.g., your __hooks__.py file might
look like this:
from wok.contrib.hooks import compile_sass
hooks = {
'site.output.post': [compile_sass]
}
Dependencies:
- libsass
'''
logging.info('Running hook compile_sass on {0}.'.format(output_dir))
for root, dirs, files in os.walk(output_dir):
for f in files:
fname, fext = os.path.splitext(f)
# Sass partials should not be compiled
if not fname.startswith('_') and fext == '.scss' or fext == '.sass':
abspath = os.path.abspath(root)
sass_src = '{0}/{1}'.format(abspath, f)
sass_dest = '{0}/{1}.css'.format(abspath, fname)
if sass is None:
logging.warning('To use compile_sass hook, you must install '
'libsass-python package.')
return
compiled_str = sass.compile(filename=sass_src, output_style='compressed')
with open(sass_dest, 'w') as f:
f.write(compiled_str)
# TODO: Get rid of extra housekeeping by compiling Sass files in
# "site.output.pre" hook
abspath = os.path.abspath(output_dir)
for f in glob.glob(os.path.join(abspath, '**', '*.s[a,c]ss')):
os.remove(f)
| 32.772358
| 89
| 0.598115
|
"""Some hooks that might be useful."""
import os
import glob
import subprocess
from StringIO import StringIO
import logging
from slugify import slugify
from wok.exceptions import DependencyException
try:
from lxml import etree
except ImportError:
etree = None
try:
import sass
except ImportError:
sass = None
class HeadingAnchors(object):
"""
Put some paragraph heading anchors.
Serves as a 'page.template.post' wok hook.
"""
def __init__(self, max_heading=3):
if not etree:
logging.warning('To use the HeadingAnchors hook, you must install '
'the library lxml.')
return
self.max_heading = max_heading
logging.info('Loaded hook HeadingAnchors')
def __call__(self, config, page):
if not etree:
return
logging.debug('Called hook HeadingAnchors on {0}'.format(page))
parser = etree.HTMLParser()
sio_source = StringIO(page.rendered)
tree = etree.parse(sio_source, parser)
for lvl in range(1, self.max_heading+1):
headings = tree.iterfind('//h{0}'.format(lvl))
for heading in headings:
if not heading.text:
continue
logging.debug('[HeadingAnchors] {0} {1}'
.format(heading, heading.text))
name = 'heading-{0}'.format(slugify(heading.text))
anchor = etree.Element('a')
anchor.set('class', 'heading_anchor')
anchor.set('href', '#' + name)
anchor.set('title', 'Permalink to this section.')
anchor.text = u'¶'
heading.append(anchor)
heading.set('id', name)
sio_destination = StringIO()
if page.template.filename.endswith(".html") or page.filename.endswith(".htm"):
logging.debug('[HeadingAnchors] outputting {0} as HTML'.format(page))
tree.write(sio_destination, method='html')
else:
logging.debug('[HeadingAnchors] outputting {0} as XML'.format(page))
tree.write(sio_destination)
page.rendered = sio_destination.getvalue()
def compile_sass(config, output_dir):
'''
Compile Sass files -> CSS in the output directory.
Any .scss or .sass files found in the output directory will be compiled
to CSS using Sass. The compiled version of the file will be created in the
same directory as the Sass file with the same name and an extension of
.css. For example, foo.scss -> foo.css.
Serves as a 'site.output.post' wok hook, e.g., your __hooks__.py file might
look like this:
from wok.contrib.hooks import compile_sass
hooks = {
'site.output.post': [compile_sass]
}
Dependencies:
- libsass
'''
logging.info('Running hook compile_sass on {0}.'.format(output_dir))
for root, dirs, files in os.walk(output_dir):
for f in files:
fname, fext = os.path.splitext(f)
if not fname.startswith('_') and fext == '.scss' or fext == '.sass':
abspath = os.path.abspath(root)
sass_src = '{0}/{1}'.format(abspath, f)
sass_dest = '{0}/{1}.css'.format(abspath, fname)
if sass is None:
logging.warning('To use compile_sass hook, you must install '
'libsass-python package.')
return
compiled_str = sass.compile(filename=sass_src, output_style='compressed')
with open(sass_dest, 'w') as f:
f.write(compiled_str)
abspath = os.path.abspath(output_dir)
for f in glob.glob(os.path.join(abspath, '**', '*.s[a,c]ss')):
os.remove(f)
| false
| true
|
f7194df8986a3f798477abbd22f0e92a006cfa20
| 967
|
py
|
Python
|
valid-sudoku/valid-sudoku.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | 1
|
2021-10-10T20:21:18.000Z
|
2021-10-10T20:21:18.000Z
|
valid-sudoku/valid-sudoku.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | null | null | null |
valid-sudoku/valid-sudoku.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | null | null | null |
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
BZip = list(zip(*board))
def Checkline(li):
temp = [i for i in li if i!="."]
return len(set(temp))==len(temp)
def check_row(board):
for i in board:
if not Checkline(i):return False
return True
def check_col(board):
for i in BZip:
if not Checkline(i):return False
return True
def square(board):
for i in range(0,9,3):
for j in range(0,9,3):
sqr = [board[x][y] for x in range(i,i+3) for y in range(j,j+3)]
if not Checkline(sqr):return False
return True
def checkmat():
return (check_row(board) and check_col(board) and square(board))
return checkmat()
| 31.193548
| 83
| 0.458118
|
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
BZip = list(zip(*board))
def Checkline(li):
temp = [i for i in li if i!="."]
return len(set(temp))==len(temp)
def check_row(board):
for i in board:
if not Checkline(i):return False
return True
def check_col(board):
for i in BZip:
if not Checkline(i):return False
return True
def square(board):
for i in range(0,9,3):
for j in range(0,9,3):
sqr = [board[x][y] for x in range(i,i+3) for y in range(j,j+3)]
if not Checkline(sqr):return False
return True
def checkmat():
return (check_row(board) and check_col(board) and square(board))
return checkmat()
| true
| true
|
f7194e480fb8f963b01db5fe2fb064378758697e
| 11,918
|
py
|
Python
|
instabot/api/api_video.py
|
danomaj/instabot
|
769e41587a1aaeb50b89f0bf9d3933123e6ddbcc
|
[
"Apache-2.0"
] | 1
|
2022-01-23T09:25:50.000Z
|
2022-01-23T09:25:50.000Z
|
instabot/api/api_video.py
|
danomaj/instabot
|
769e41587a1aaeb50b89f0bf9d3933123e6ddbcc
|
[
"Apache-2.0"
] | null | null | null |
instabot/api/api_video.py
|
danomaj/instabot
|
769e41587a1aaeb50b89f0bf9d3933123e6ddbcc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import copy
import json
import os
import re
import shutil
import subprocess
import time
from requests_toolbelt import MultipartEncoder
from . import config
def download_video(
self,
media_id,
filename=None,
media=False,
folder="videos"
):
video_urls = []
if not media:
self.media_info(media_id)
media = self.last_json["items"][0]
filename = (
"{}_{}.mp4".format(media["user"]["username"], media_id)
if not filename
else "{}.mp4".format(filename)
)
try:
clips = media["video_versions"]
video_urls.append(clips[0]["url"])
except KeyError:
carousels = media.get("carousel_media", [])
for carousel in carousels:
video_urls.append(carousel["video_versions"][0]["url"])
except Exception:
return False
for counter, video_url in enumerate(video_urls):
fname = os.path.join(folder, "{}_{}".format(counter, filename))
if os.path.exists(fname):
print('File %s is exists, return it' % fname)
return os.path.abspath(fname)
response = self.session.get(video_url, stream=True)
if response.status_code == 200:
with open(fname, "wb") as f:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, f)
return os.path.abspath(fname)
# leaving here function used by old upload_video, no more used now
def get_video_info(filename):
res = {}
try:
terminalResult = subprocess.Popen(
["ffprobe", filename],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
for x in terminalResult.stdout.readlines():
# Duration: 00:00:59.51, start: 0.000000, bitrate: 435 kb/s
m = re.search(
r"duration: (\d\d:\d\d:\d\d\.\d\d),",
str(x),
flags=re.IGNORECASE
)
if m is not None:
res["duration"] = m.group(1)
# Video: h264 (Constrained Baseline)
# (avc1 / 0x31637661), yuv420p, 480x268
m = re.search(
r"video:\s.*\s(\d+)x(\d+)\s",
str(x),
flags=re.IGNORECASE
)
if m is not None:
res["width"] = m.group(1)
res["height"] = m.group(2)
finally:
if "width" not in res:
print(
"ERROR: 'ffprobe' not found, please install "
"'ffprobe' with one of following methods:"
)
print(" sudo apt-get install ffmpeg")
print("or sudo apt-get install -y libav-tools")
return res
def upload_video(
self,
video,
caption=None,
upload_id=None,
thumbnail=None,
options={}
):
"""Upload video to Instagram
@param video Path to video file (String)
@param caption Media description (String)
@param upload_id Unique upload_id (String). When None, then generate
automatically
@param thumbnail Path to thumbnail for video (String). When None, then
thumbnail is generate automatically
@param options Object with difference options, e.g. configure_timeout,
rename_thumbnail, rename (Dict)
Designed to reduce the number of function arguments!
This is the simplest request object.
@return Object with state of uploading to Instagram (or False)
"""
options = dict(
{"configure_timeout": 15, "rename_thumbnail": True, "rename": True},
**(options or {})
)
if upload_id is None:
upload_id = str(int(time.time() * 1000))
video, thumbnail, width, height, duration = resize_video(video, thumbnail)
data = {
"upload_id": upload_id,
"_csrftoken": self.token,
"media_type": "2",
"_uuid": self.uuid,
}
m = MultipartEncoder(data, boundary=self.uuid)
self.session.headers.update(
{
"X-IG-Capabilities": "3Q4=",
"X-IG-Connection-Type": "WIFI",
"Host": "i.instagram.com",
"Cookie2": "$Version=1",
"Accept-Language": "en-US",
"Accept-Encoding": "gzip, deflate",
"Content-type": m.content_type,
"Connection": "keep-alive",
"User-Agent": self.user_agent,
}
)
response = self.session.post(
config.API_URL + "upload/video/", data=m.to_string()
)
if response.status_code == 200:
body = json.loads(response.text)
upload_url = body["video_upload_urls"][3]["url"]
upload_job = body["video_upload_urls"][3]["job"]
with open(video, "rb") as video_bytes:
video_data = video_bytes.read()
# solve issue #85 TypeError:
# slice indices must be integers or None or have an __index__ method
request_size = len(video_data) // 4
last_request_extra = len(video_data) - 3 * request_size
headers = copy.deepcopy(self.session.headers)
self.session.headers.update(
{
"X-IG-Capabilities": "3Q4=",
"X-IG-Connection-Type": "WIFI",
"Cookie2": "$Version=1",
"Accept-Language": "en-US",
"Accept-Encoding": "gzip, deflate",
"Content-type": "application/octet-stream",
"Session-ID": upload_id,
"Connection": "keep-alive",
"Content-Disposition": 'attachment; filename="video.mov"',
"job": upload_job,
"Host": "upload.instagram.com",
"User-Agent": self.user_agent,
}
)
for i in range(4):
start = i * request_size
if i == 3:
end = i * request_size + last_request_extra
else:
end = (i + 1) * request_size
length = last_request_extra if i == 3 else request_size
content_range = "bytes {start}-{end}/{len_video}".format(
start=start, end=end - 1, len_video=len(video_data)
).encode("utf-8")
self.session.headers.update(
{
"Content-Length": str(end - start),
"Content-Range": content_range
}
)
response = self.session.post(
upload_url, data=video_data[start: start + length]
)
self.session.headers = headers
configure_timeout = options.get("configure_timeout")
if response.status_code == 200:
for attempt in range(4):
if configure_timeout:
time.sleep(configure_timeout)
if self.configure_video(
upload_id,
video,
thumbnail,
width,
height,
duration,
caption,
options=options,
):
media = self.last_json.get("media")
self.expose()
if options.get("rename"):
from os import rename
rename(video, "{}.REMOVE_ME".format(video))
return media
return False
def configure_video(
self,
upload_id,
video,
thumbnail,
width,
height,
duration,
caption="",
options={}
):
"""Post Configure Video (send caption, thumbnail and more to Instagram)
@param upload_id Unique upload_id (String). Received from "upload_video"
@param video Path to video file (String)
@param thumbnail Path to thumbnail for video (String). When None,
then thumbnail is generate automatically
@param width Width in px (Integer)
@param height Height in px (Integer)
@param duration Duration in seconds (Integer)
@param caption Media description (String)
@param options Object with difference options, e.g. configure_timeout,
rename_thumbnail, rename (Dict)
Designed to reduce the number of function arguments!
This is the simplest request object.
"""
# clipInfo = get_video_info(video)
options = {"rename": options.get("rename_thumbnail", True)}
self.upload_photo(
photo=thumbnail,
caption=caption,
upload_id=upload_id,
from_video=True,
options=options,
)
data = self.json_data(
{
"upload_id": upload_id,
"source_type": 3,
"poster_frame_index": 0,
"length": 0.00,
"audio_muted": False,
"filter_type": 0,
"video_result": "deprecated",
"clips": {
"length": duration,
"source_type": "3",
"camera_position": "back",
},
"extra": {"source_width": width, "source_height": height},
"device": self.device_settings,
"caption": caption,
}
)
return self.send_request("media/configure/?video=1", data)
def resize_video(fname, thumbnail=None):
from math import ceil
try:
import moviepy.editor as mp
except ImportError as e:
print("ERROR: {}".format(e))
print(
"Required module `moviepy` not installed\n"
"Install with `pip install moviepy` and retry.\n\n"
"You may need also:\n"
"pip install --upgrade setuptools\n"
"pip install numpy --upgrade --ignore-installed"
)
return False
print("Analizing `{}`".format(fname))
h_lim = {"w": 90.0, "h": 47.0}
v_lim = {"w": 4.0, "h": 5.0}
d_lim = 60
vid = mp.VideoFileClip(fname)
(w, h) = vid.size
deg = vid.rotation
ratio = w * 1.0 / h * 1.0
print(
"FOUND w:{w}, h:{h}, rotation={d}, ratio={r}".format(
w=w,
h=h,
r=ratio,
d=deg
)
)
if w > h:
print("Horizontal video")
if ratio > (h_lim["w"] / h_lim["h"]):
print("Cropping video")
cut = int(ceil((w - h * h_lim["w"] / h_lim["h"]) / 2))
left = cut
right = w - cut
top = 0
bottom = h
vid = vid.crop(x1=left, y1=top, x2=right, y2=bottom)
(w, h) = vid.size
if w > 1081:
print("Resizing video")
vid = vid.resize(width=1080)
elif w < h:
print("Vertical video")
if ratio < (v_lim["w"] / v_lim["h"]):
print("Cropping video")
cut = int(ceil((h - w * v_lim["h"] / v_lim["w"]) / 2))
left = 0
right = w
top = cut
bottom = h - cut
vid = vid.crop(x1=left, y1=top, x2=right, y2=bottom)
(w, h) = vid.size
if h > 1081:
print("Resizing video")
vid = vid.resize(height=1080)
else:
print("Square video")
if w > 1081:
print("Resizing video")
vid = vid.resize(width=1080)
(w, h) = vid.size
if vid.duration > d_lim:
print("Cutting video to {} sec from start".format(d_lim))
vid = vid.subclip(0, d_lim)
new_fname = "{}.CONVERTED.mp4".format(fname)
print(
"Saving new video w:{w} h:{h} to `{f}`".format(
w=w,
h=h,
f=new_fname
)
)
vid.write_videofile(new_fname, codec="libx264", audio_codec="aac")
if not thumbnail:
print("Generating thumbnail...")
thumbnail = "{}.jpg".format(fname)
vid.save_frame(thumbnail, t=(vid.duration / 2))
return new_fname, thumbnail, w, h, vid.duration
| 32.831956
| 78
| 0.523662
|
import copy
import json
import os
import re
import shutil
import subprocess
import time
from requests_toolbelt import MultipartEncoder
from . import config
def download_video(
self,
media_id,
filename=None,
media=False,
folder="videos"
):
video_urls = []
if not media:
self.media_info(media_id)
media = self.last_json["items"][0]
filename = (
"{}_{}.mp4".format(media["user"]["username"], media_id)
if not filename
else "{}.mp4".format(filename)
)
try:
clips = media["video_versions"]
video_urls.append(clips[0]["url"])
except KeyError:
carousels = media.get("carousel_media", [])
for carousel in carousels:
video_urls.append(carousel["video_versions"][0]["url"])
except Exception:
return False
for counter, video_url in enumerate(video_urls):
fname = os.path.join(folder, "{}_{}".format(counter, filename))
if os.path.exists(fname):
print('File %s is exists, return it' % fname)
return os.path.abspath(fname)
response = self.session.get(video_url, stream=True)
if response.status_code == 200:
with open(fname, "wb") as f:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, f)
return os.path.abspath(fname)
def get_video_info(filename):
res = {}
try:
terminalResult = subprocess.Popen(
["ffprobe", filename],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
for x in terminalResult.stdout.readlines():
m = re.search(
r"duration: (\d\d:\d\d:\d\d\.\d\d),",
str(x),
flags=re.IGNORECASE
)
if m is not None:
res["duration"] = m.group(1)
m = re.search(
r"video:\s.*\s(\d+)x(\d+)\s",
str(x),
flags=re.IGNORECASE
)
if m is not None:
res["width"] = m.group(1)
res["height"] = m.group(2)
finally:
if "width" not in res:
print(
"ERROR: 'ffprobe' not found, please install "
"'ffprobe' with one of following methods:"
)
print(" sudo apt-get install ffmpeg")
print("or sudo apt-get install -y libav-tools")
return res
def upload_video(
self,
video,
caption=None,
upload_id=None,
thumbnail=None,
options={}
):
options = dict(
{"configure_timeout": 15, "rename_thumbnail": True, "rename": True},
**(options or {})
)
if upload_id is None:
upload_id = str(int(time.time() * 1000))
video, thumbnail, width, height, duration = resize_video(video, thumbnail)
data = {
"upload_id": upload_id,
"_csrftoken": self.token,
"media_type": "2",
"_uuid": self.uuid,
}
m = MultipartEncoder(data, boundary=self.uuid)
self.session.headers.update(
{
"X-IG-Capabilities": "3Q4=",
"X-IG-Connection-Type": "WIFI",
"Host": "i.instagram.com",
"Cookie2": "$Version=1",
"Accept-Language": "en-US",
"Accept-Encoding": "gzip, deflate",
"Content-type": m.content_type,
"Connection": "keep-alive",
"User-Agent": self.user_agent,
}
)
response = self.session.post(
config.API_URL + "upload/video/", data=m.to_string()
)
if response.status_code == 200:
body = json.loads(response.text)
upload_url = body["video_upload_urls"][3]["url"]
upload_job = body["video_upload_urls"][3]["job"]
with open(video, "rb") as video_bytes:
video_data = video_bytes.read()
request_size = len(video_data) // 4
last_request_extra = len(video_data) - 3 * request_size
headers = copy.deepcopy(self.session.headers)
self.session.headers.update(
{
"X-IG-Capabilities": "3Q4=",
"X-IG-Connection-Type": "WIFI",
"Cookie2": "$Version=1",
"Accept-Language": "en-US",
"Accept-Encoding": "gzip, deflate",
"Content-type": "application/octet-stream",
"Session-ID": upload_id,
"Connection": "keep-alive",
"Content-Disposition": 'attachment; filename="video.mov"',
"job": upload_job,
"Host": "upload.instagram.com",
"User-Agent": self.user_agent,
}
)
for i in range(4):
start = i * request_size
if i == 3:
end = i * request_size + last_request_extra
else:
end = (i + 1) * request_size
length = last_request_extra if i == 3 else request_size
content_range = "bytes {start}-{end}/{len_video}".format(
start=start, end=end - 1, len_video=len(video_data)
).encode("utf-8")
self.session.headers.update(
{
"Content-Length": str(end - start),
"Content-Range": content_range
}
)
response = self.session.post(
upload_url, data=video_data[start: start + length]
)
self.session.headers = headers
configure_timeout = options.get("configure_timeout")
if response.status_code == 200:
for attempt in range(4):
if configure_timeout:
time.sleep(configure_timeout)
if self.configure_video(
upload_id,
video,
thumbnail,
width,
height,
duration,
caption,
options=options,
):
media = self.last_json.get("media")
self.expose()
if options.get("rename"):
from os import rename
rename(video, "{}.REMOVE_ME".format(video))
return media
return False
def configure_video(
self,
upload_id,
video,
thumbnail,
width,
height,
duration,
caption="",
options={}
):
options = {"rename": options.get("rename_thumbnail", True)}
self.upload_photo(
photo=thumbnail,
caption=caption,
upload_id=upload_id,
from_video=True,
options=options,
)
data = self.json_data(
{
"upload_id": upload_id,
"source_type": 3,
"poster_frame_index": 0,
"length": 0.00,
"audio_muted": False,
"filter_type": 0,
"video_result": "deprecated",
"clips": {
"length": duration,
"source_type": "3",
"camera_position": "back",
},
"extra": {"source_width": width, "source_height": height},
"device": self.device_settings,
"caption": caption,
}
)
return self.send_request("media/configure/?video=1", data)
def resize_video(fname, thumbnail=None):
from math import ceil
try:
import moviepy.editor as mp
except ImportError as e:
print("ERROR: {}".format(e))
print(
"Required module `moviepy` not installed\n"
"Install with `pip install moviepy` and retry.\n\n"
"You may need also:\n"
"pip install --upgrade setuptools\n"
"pip install numpy --upgrade --ignore-installed"
)
return False
print("Analizing `{}`".format(fname))
h_lim = {"w": 90.0, "h": 47.0}
v_lim = {"w": 4.0, "h": 5.0}
d_lim = 60
vid = mp.VideoFileClip(fname)
(w, h) = vid.size
deg = vid.rotation
ratio = w * 1.0 / h * 1.0
print(
"FOUND w:{w}, h:{h}, rotation={d}, ratio={r}".format(
w=w,
h=h,
r=ratio,
d=deg
)
)
if w > h:
print("Horizontal video")
if ratio > (h_lim["w"] / h_lim["h"]):
print("Cropping video")
cut = int(ceil((w - h * h_lim["w"] / h_lim["h"]) / 2))
left = cut
right = w - cut
top = 0
bottom = h
vid = vid.crop(x1=left, y1=top, x2=right, y2=bottom)
(w, h) = vid.size
if w > 1081:
print("Resizing video")
vid = vid.resize(width=1080)
elif w < h:
print("Vertical video")
if ratio < (v_lim["w"] / v_lim["h"]):
print("Cropping video")
cut = int(ceil((h - w * v_lim["h"] / v_lim["w"]) / 2))
left = 0
right = w
top = cut
bottom = h - cut
vid = vid.crop(x1=left, y1=top, x2=right, y2=bottom)
(w, h) = vid.size
if h > 1081:
print("Resizing video")
vid = vid.resize(height=1080)
else:
print("Square video")
if w > 1081:
print("Resizing video")
vid = vid.resize(width=1080)
(w, h) = vid.size
if vid.duration > d_lim:
print("Cutting video to {} sec from start".format(d_lim))
vid = vid.subclip(0, d_lim)
new_fname = "{}.CONVERTED.mp4".format(fname)
print(
"Saving new video w:{w} h:{h} to `{f}`".format(
w=w,
h=h,
f=new_fname
)
)
vid.write_videofile(new_fname, codec="libx264", audio_codec="aac")
if not thumbnail:
print("Generating thumbnail...")
thumbnail = "{}.jpg".format(fname)
vid.save_frame(thumbnail, t=(vid.duration / 2))
return new_fname, thumbnail, w, h, vid.duration
| true
| true
|
f7194ea7dd6a75befd86621e8258b956db12dfa2
| 1,839
|
py
|
Python
|
audiospec.py
|
MountainRange/mobius_score
|
fc900ab456b3e3431cfa6d9684b97ec6321d0a23
|
[
"MIT"
] | null | null | null |
audiospec.py
|
MountainRange/mobius_score
|
fc900ab456b3e3431cfa6d9684b97ec6321d0a23
|
[
"MIT"
] | null | null | null |
audiospec.py
|
MountainRange/mobius_score
|
fc900ab456b3e3431cfa6d9684b97ec6321d0a23
|
[
"MIT"
] | null | null | null |
import numpy as np
import librosa
from tqdm import tqdm
from audiomisc import ks_key
from constants import VERTICALCUTOFF, FFT_SIZE, FFT_HOP
def stft(x, fft_size, hopsamp):
window = np.hanning(fft_size)
return np.array([np.fft.rfft(window*x[i:i+fft_size])
for i in range(0, len(x)-fft_size, hopsamp)])
def wav_to_spec(fn):
input_signal, sample_rate = librosa.load(fn, sr=44100)
stft_mag = np.array([])
split = int(1e6)#int(264600)
fft_size = FFT_SIZE
hopsamp = fft_size // FFT_HOP
for i in tqdm(range(len(input_signal)//split)):
temp_signal = input_signal[(split*i):(split*(i+1))]
stft_full = stft(temp_signal, fft_size, hopsamp)
stft_full = abs(stft_full)
if np.max(stft_full) != 0:
stft_full = (stft_full - np.mean(stft_full)) / np.std(stft_full)
stft_full += abs(np.min(stft_full))
stft_full *= 255.0/np.max(stft_full)
if stft_mag.shape[0] != 0:
stft_mag = np.concatenate((stft_mag, stft_full))
else:
stft_mag = stft_full
print("Calculating tempo")
tempo, _ = librosa.beat.beat_track(y=input_signal, sr=sample_rate, hop_length=512)
print("Calculating music key")
chroma = librosa.feature.chroma_stft(y=input_signal, sr=sample_rate)
chroma = [sum(x)/len(x) for x in chroma]
bestmajor, bestminor = ks_key(chroma)
if max(bestmajor) > max(bestminor):
key = np.argmax(bestmajor)
# C, Db, D, Eb, E, F, F#, G, Ab, A, Bb, B
keymap = [0, -5, 2, -3, 4, -1, 6, 1, -4, 3, -2, 5]
else:
key = np.argmax(bestminor)
# c, c#, d, eb, e, f, f#, g, g#, a, bb, b
keymap = [-3, 4, -1, -6, 1, -4, 3, -2, 5, 0, -5, 2]
return stft_mag[:, :VERTICALCUTOFF].T, tempo, keymap[key]
| 36.058824
| 86
| 0.594889
|
import numpy as np
import librosa
from tqdm import tqdm
from audiomisc import ks_key
from constants import VERTICALCUTOFF, FFT_SIZE, FFT_HOP
def stft(x, fft_size, hopsamp):
window = np.hanning(fft_size)
return np.array([np.fft.rfft(window*x[i:i+fft_size])
for i in range(0, len(x)-fft_size, hopsamp)])
def wav_to_spec(fn):
input_signal, sample_rate = librosa.load(fn, sr=44100)
stft_mag = np.array([])
split = int(1e6)
fft_size = FFT_SIZE
hopsamp = fft_size // FFT_HOP
for i in tqdm(range(len(input_signal)//split)):
temp_signal = input_signal[(split*i):(split*(i+1))]
stft_full = stft(temp_signal, fft_size, hopsamp)
stft_full = abs(stft_full)
if np.max(stft_full) != 0:
stft_full = (stft_full - np.mean(stft_full)) / np.std(stft_full)
stft_full += abs(np.min(stft_full))
stft_full *= 255.0/np.max(stft_full)
if stft_mag.shape[0] != 0:
stft_mag = np.concatenate((stft_mag, stft_full))
else:
stft_mag = stft_full
print("Calculating tempo")
tempo, _ = librosa.beat.beat_track(y=input_signal, sr=sample_rate, hop_length=512)
print("Calculating music key")
chroma = librosa.feature.chroma_stft(y=input_signal, sr=sample_rate)
chroma = [sum(x)/len(x) for x in chroma]
bestmajor, bestminor = ks_key(chroma)
if max(bestmajor) > max(bestminor):
key = np.argmax(bestmajor)
[0, -5, 2, -3, 4, -1, 6, 1, -4, 3, -2, 5]
else:
key = np.argmax(bestminor)
return stft_mag[:, :VERTICALCUTOFF].T, tempo, keymap[key]
| true
| true
|
f7194ee4f7406065cad234d9d9516182a66c1fdd
| 4,936
|
py
|
Python
|
catalog/views.py
|
singdingo/django_local_library
|
e6928ce96d37e5f233a5eda89dcf63c04a551a2d
|
[
"MIT"
] | null | null | null |
catalog/views.py
|
singdingo/django_local_library
|
e6928ce96d37e5f233a5eda89dcf63c04a551a2d
|
[
"MIT"
] | null | null | null |
catalog/views.py
|
singdingo/django_local_library
|
e6928ce96d37e5f233a5eda89dcf63c04a551a2d
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import permission_required
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from .models import Book, Author, BookInstance, Genre
from django.views import generic
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.http import HttpResponseRedirect
from django.urls import reverse, reverse_lazy
import datetime
from .forms import RenewBookForm
def index(request):
"""
View function for home page of site.
"""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
# Available books (status = 'a')
num_instances_available = BookInstance.objects.filter(
status__exact='a').count()
num_authors = Author.objects.count() # The 'all()' is implied by default.
# Number of visits to this view, as counted in the session variable.
num_visits = request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits + 1
# Render the HTML template index.html with the data in the context variable
return render(
request,
'index.html',
context={
'num_books': num_books,
'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors,
'num_visits': num_visits
},
)
class BookListView(generic.ListView):
model = Book
paginate_by = 2
# Some possible overrides
'''
context_object_name = 'my_book_list' # your own name for the list as a template variable
template_name = 'books/my_arbitrary_template_name_list.html' # Specify your own template name/location
'''
#Another potentially useful override
'''
def get_queryset(self):
return Book.objects.filter(
title__icontains='war')[:5] # Get 5 books containing the title war
'''
class BookDetailView(generic.DetailView):
model=Book
class AuthorListView(generic.ListView):
model = Author
paginate_by = 10
class AuthorDetailView(generic.DetailView):
model = Author
class LoanedBooksByUserListView(LoginRequiredMixin, generic.ListView):
"""
Generic class based view listing books on loan to current user
"""
model = BookInstance
template_name = 'catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
class LoanedBooksStaffListview(PermissionRequiredMixin, generic.ListView):
"""
Generic class back view listing all loanded books (for staff only).
"""
model = BookInstance
template_name = 'catalog/book_instance_list_borrowed_staff.html'
paginate_by = 10
# Set permissions
permission_required = ('catalog.can_mark_returned',)
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by("due_back")
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
"""
View function for renewing a specific BookInstance by librarian
"""
book_inst = get_object_or_404(BookInstance, pk=pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_inst.due_back = form.cleaned_data['renewal_date']
book_inst.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed'))
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(
weeks=3)
form = RenewBookForm(initial={
'renewal_date': proposed_renewal_date,
})
return render(request, 'catalog/book_renew_librarian.html', {
'form': form,
'bookinst': book_inst
})
class AuthorCreate(CreateView):
model = Author
fields = '__all__'
#initial={'date_of_death':'05/01/2018',}
class AuthorUpdate(UpdateView):
model = Author
fields = ['first_name','last_name','date_of_birth','date_of_death']
class AuthorDelete(DeleteView):
model = Author
success_url = reverse_lazy('authors')
class BookCreate(CreateView):
model = Book
fields = '__all__'
class BookUpdate(UpdateView):
model = Book
fields = '__all__'
class BookDelete(DeleteView):
model = Book
success_url = reverse_lazy('books')
| 32.261438
| 117
| 0.6953
|
from django.contrib.auth.decorators import permission_required
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from .models import Book, Author, BookInstance, Genre
from django.views import generic
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.http import HttpResponseRedirect
from django.urls import reverse, reverse_lazy
import datetime
from .forms import RenewBookForm
def index(request):
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
num_instances_available = BookInstance.objects.filter(
status__exact='a').count()
num_authors = Author.objects.count()
num_visits = request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits + 1
return render(
request,
'index.html',
context={
'num_books': num_books,
'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors,
'num_visits': num_visits
},
)
class BookListView(generic.ListView):
model = Book
paginate_by = 2
class BookDetailView(generic.DetailView):
model=Book
class AuthorListView(generic.ListView):
model = Author
paginate_by = 10
class AuthorDetailView(generic.DetailView):
model = Author
class LoanedBooksByUserListView(LoginRequiredMixin, generic.ListView):
model = BookInstance
template_name = 'catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
class LoanedBooksStaffListview(PermissionRequiredMixin, generic.ListView):
model = BookInstance
template_name = 'catalog/book_instance_list_borrowed_staff.html'
paginate_by = 10
permission_required = ('catalog.can_mark_returned',)
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by("due_back")
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
book_inst = get_object_or_404(BookInstance, pk=pk)
if request.method == 'POST':
form = RenewBookForm(request.POST)
if form.is_valid():
book_inst.due_back = form.cleaned_data['renewal_date']
book_inst.save()
return HttpResponseRedirect(reverse('all-borrowed'))
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(
weeks=3)
form = RenewBookForm(initial={
'renewal_date': proposed_renewal_date,
})
return render(request, 'catalog/book_renew_librarian.html', {
'form': form,
'bookinst': book_inst
})
class AuthorCreate(CreateView):
model = Author
fields = '__all__'
class AuthorUpdate(UpdateView):
model = Author
fields = ['first_name','last_name','date_of_birth','date_of_death']
class AuthorDelete(DeleteView):
model = Author
success_url = reverse_lazy('authors')
class BookCreate(CreateView):
model = Book
fields = '__all__'
class BookUpdate(UpdateView):
model = Book
fields = '__all__'
class BookDelete(DeleteView):
model = Book
success_url = reverse_lazy('books')
| true
| true
|
f7194f009ff4f9095f19defc8b8b945fae4f793a
| 698
|
py
|
Python
|
PyCharm/Exercicios/Aula17/ex078.py
|
fabiodarice/Python
|
15ec1c7428f138be875111ac98ba38cf2eec1a93
|
[
"MIT"
] | null | null | null |
PyCharm/Exercicios/Aula17/ex078.py
|
fabiodarice/Python
|
15ec1c7428f138be875111ac98ba38cf2eec1a93
|
[
"MIT"
] | null | null | null |
PyCharm/Exercicios/Aula17/ex078.py
|
fabiodarice/Python
|
15ec1c7428f138be875111ac98ba38cf2eec1a93
|
[
"MIT"
] | null | null | null |
# Importação de bibliotecas
# Título do programa
print('\033[1;34;40mMAIOR E MENOR VALORES NA LISTA\033[m')
# Objetos
valores = list()
# Lógica
for c in range(0, 5):
valores.append(int(input(f'\033[30mDigite um valor para a Posição {c}:\033[m ')))
maior = max(valores)
menor = min(valores)
print('=-' * 30)
print(f'Você digitou os valores {valores}')
print(f'O maior valor digitado foi {maior} nas posições ', end='')
for pos, num in enumerate(valores):
if maior == num:
print(f'{pos}...', '', end='')
print()
print(f'O menor valor digitado foi {menor} nas posições ', end='')
for pos, num in enumerate(valores):
if menor == num:
print(f'{pos}...', '', end='')
| 22.516129
| 85
| 0.636103
|
print('\033[1;34;40mMAIOR E MENOR VALORES NA LISTA\033[m')
valores = list()
for c in range(0, 5):
valores.append(int(input(f'\033[30mDigite um valor para a Posição {c}:\033[m ')))
maior = max(valores)
menor = min(valores)
print('=-' * 30)
print(f'Você digitou os valores {valores}')
print(f'O maior valor digitado foi {maior} nas posições ', end='')
for pos, num in enumerate(valores):
if maior == num:
print(f'{pos}...', '', end='')
print()
print(f'O menor valor digitado foi {menor} nas posições ', end='')
for pos, num in enumerate(valores):
if menor == num:
print(f'{pos}...', '', end='')
| true
| true
|
f7194f875486f67d1eadb26fc5e87f6bfaed4596
| 6,237
|
py
|
Python
|
detect/image_detector.py
|
Prasad9/Detect-Flags-SSD
|
c0d662bde99ed8df33d72bd06d61d5eb869d31a5
|
[
"MIT"
] | 13
|
2017-11-08T07:09:13.000Z
|
2022-03-28T07:09:47.000Z
|
detect/image_detector.py
|
Prasad9/Detect-Flags-SSD
|
c0d662bde99ed8df33d72bd06d61d5eb869d31a5
|
[
"MIT"
] | 3
|
2018-03-08T04:30:19.000Z
|
2019-01-03T15:47:24.000Z
|
detect/image_detector.py
|
Prasad9/Detect-Flags-SSD
|
c0d662bde99ed8df33d72bd06d61d5eb869d31a5
|
[
"MIT"
] | 5
|
2018-01-15T15:26:44.000Z
|
2021-08-18T08:02:51.000Z
|
from __future__ import print_function
import mxnet as mx
import numpy as np
from timeit import default_timer as timer
from dataset.iterator import DetTestImageIter
import cv2
class ImageDetector(object):
"""
SSD detector which hold a detection network and wraps detection API
Parameters:
----------
symbol : mx.Symbol
detection network Symbol
model_prefix : str
name prefix of trained model
epoch : int
load epoch of trained model
data_shape : int
input data resize shape
mean_pixels : tuple of float
(mean_r, mean_g, mean_b)
batch_size : int
run detection with batch size
ctx : mx.ctx
device to use, if None, use mx.cpu() as default context
"""
def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \
classes, thresh = 0.6, plot_confidence = True, batch_size=1, ctx=None):
self.ctx = ctx
if self.ctx is None:
self.ctx = mx.cpu()
load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
if symbol is None:
symbol = load_symbol
self.mod = mx.mod.Module(symbol, label_names=None, context=ctx)
self.data_shape = data_shape
self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))])
self.mod.set_params(args, auxs)
self.data_shape = data_shape
self.mean_pixels = mean_pixels
self.classes = classes
self.colors = []
self.fill_random_colors_int()
self.thresh = thresh
self.plot_confidence = plot_confidence
def fill_random_colors(self):
import random
for i in range(len(self.classes)):
self.colors.append((random.random(), random.random(), random.random()))
#print(self.colors)
def fill_random_colors_int(self):
import random
for i in range(len(self.classes)):
self.colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
#print(self.colors)
def detect(self, det_iter, show_timer=False):
"""
detect all images in iterator
Parameters:
----------
det_iter : DetIter
iterator for all testing images
show_timer : Boolean
whether to print out detection exec time
Returns:
----------
list of detection results
"""
num_images = det_iter._size
result = []
detections = []
#if not isinstance(det_iter, mx.io.PrefetchingIter):
# det_iter = mx.io.PrefetchingIter(det_iter)
start = timer()
for pred, _, _ in self.mod.iter_predict(det_iter):
detections.append(pred[0].asnumpy())
time_elapsed = timer() - start
if show_timer:
print("Detection time for {} images: {:.4f} sec".format(num_images, time_elapsed))
for output in detections:
for i in range(output.shape[0]):
det = output[i, :, :]
res = det[np.where(det[:, 0] >= 0)[0]]
result.append(res)
resized_img = det_iter.current_data()
return result, resized_img
def im_detect(self, img, show_timer=False):
"""
wrapper for detecting multiple images
Parameters:
----------
im_list : list of str
image path or list of image paths
root_dir : str
directory of input images, optional if image path already
has full directory information
extension : str
image extension, eg. ".jpg", optional
Returns:
----------
list of detection results in format [det0, det1...], det is in
format np.array([id, score, xmin, ymin, xmax, ymax]...)
"""
im_list = [img]
test_iter = DetTestImageIter(im_list, 1, self.data_shape, self.mean_pixels)
return self.detect(test_iter, show_timer)
def plot_rects(self, img, dets):
img_shape = img.shape
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
#print('Score is {}, class {}'.format(score, cls_id))
if score > self.thresh:
xmin = int(dets[i, 2] * img_shape[1])
ymin = int(dets[i, 3] * img_shape[0])
xmax = int(dets[i, 4] * img_shape[1])
ymax = int(dets[i, 5] * img_shape[0])
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)
class_name = self.classes[cls_id]
cv2.putText(img, class_name, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
#print('Class id = {}, Score = {}, Country = {}, rect = ({}, {}, {}, {})'.format(cls_id, score, class_name, xmin, ymin, xmax, ymax))
def detect_and_visualize_image(self, img, show_timer=False):
"""
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
"""
dets, resized_img = self.im_detect(img, show_timer=show_timer)
resized_img = resized_img.asnumpy()
resized_img /= 255.0
for k, det in enumerate(dets):
self.plot_rects(resized_img, det)
return resized_img
def scale_and_plot_rects(self, img, dets):
img_shape = img.shape
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
#print('Score is {}, class {}'.format(score, cls_id))
if score > self.thresh:
xmin = int(dets[i, 2] * img_shape[1])
ymin = int(dets[i, 3] * img_shape[0])
xmax = int(dets[i, 4] * img_shape[1])
ymax = int(dets[i, 5] * img_shape[0])
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)
class_name = self.classes[cls_id]
cv2.putText(img, class_name, (xmin, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 255), 3)
if self.plot_confidence:
score_color = (0, 255, 0) if score > 0.5 else (255, 0, 0)
cv2.putText(img, '{:.3f}'.format(score), (xmax - 60, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 1, score_color, 1)
def detect_and_layover_image(self, img, show_timer=False):
"""
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
"""
dets, _ = self.im_detect(img, show_timer=show_timer)
for k, det in enumerate(dets):
self.scale_and_plot_rects(img, det)
return img
| 29.842105
| 137
| 0.674683
|
from __future__ import print_function
import mxnet as mx
import numpy as np
from timeit import default_timer as timer
from dataset.iterator import DetTestImageIter
import cv2
class ImageDetector(object):
def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \
classes, thresh = 0.6, plot_confidence = True, batch_size=1, ctx=None):
self.ctx = ctx
if self.ctx is None:
self.ctx = mx.cpu()
load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
if symbol is None:
symbol = load_symbol
self.mod = mx.mod.Module(symbol, label_names=None, context=ctx)
self.data_shape = data_shape
self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))])
self.mod.set_params(args, auxs)
self.data_shape = data_shape
self.mean_pixels = mean_pixels
self.classes = classes
self.colors = []
self.fill_random_colors_int()
self.thresh = thresh
self.plot_confidence = plot_confidence
def fill_random_colors(self):
import random
for i in range(len(self.classes)):
self.colors.append((random.random(), random.random(), random.random()))
def fill_random_colors_int(self):
import random
for i in range(len(self.classes)):
self.colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
def detect(self, det_iter, show_timer=False):
num_images = det_iter._size
result = []
detections = []
start = timer()
for pred, _, _ in self.mod.iter_predict(det_iter):
detections.append(pred[0].asnumpy())
time_elapsed = timer() - start
if show_timer:
print("Detection time for {} images: {:.4f} sec".format(num_images, time_elapsed))
for output in detections:
for i in range(output.shape[0]):
det = output[i, :, :]
res = det[np.where(det[:, 0] >= 0)[0]]
result.append(res)
resized_img = det_iter.current_data()
return result, resized_img
def im_detect(self, img, show_timer=False):
im_list = [img]
test_iter = DetTestImageIter(im_list, 1, self.data_shape, self.mean_pixels)
return self.detect(test_iter, show_timer)
def plot_rects(self, img, dets):
img_shape = img.shape
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
if score > self.thresh:
xmin = int(dets[i, 2] * img_shape[1])
ymin = int(dets[i, 3] * img_shape[0])
xmax = int(dets[i, 4] * img_shape[1])
ymax = int(dets[i, 5] * img_shape[0])
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)
class_name = self.classes[cls_id]
cv2.putText(img, class_name, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
def detect_and_visualize_image(self, img, show_timer=False):
dets, resized_img = self.im_detect(img, show_timer=show_timer)
resized_img = resized_img.asnumpy()
resized_img /= 255.0
for k, det in enumerate(dets):
self.plot_rects(resized_img, det)
return resized_img
def scale_and_plot_rects(self, img, dets):
img_shape = img.shape
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
if score > self.thresh:
xmin = int(dets[i, 2] * img_shape[1])
ymin = int(dets[i, 3] * img_shape[0])
xmax = int(dets[i, 4] * img_shape[1])
ymax = int(dets[i, 5] * img_shape[0])
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)
class_name = self.classes[cls_id]
cv2.putText(img, class_name, (xmin, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 255), 3)
if self.plot_confidence:
score_color = (0, 255, 0) if score > 0.5 else (255, 0, 0)
cv2.putText(img, '{:.3f}'.format(score), (xmax - 60, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 1, score_color, 1)
def detect_and_layover_image(self, img, show_timer=False):
dets, _ = self.im_detect(img, show_timer=show_timer)
for k, det in enumerate(dets):
self.scale_and_plot_rects(img, det)
return img
| true
| true
|
f7194f9c5decb291e54561f76b15458cea4e4f8b
| 357
|
py
|
Python
|
aliexpress/api/rest/MarketingRedefiningGetactlist.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | 3
|
2021-03-10T16:46:43.000Z
|
2022-03-29T15:28:50.000Z
|
aliexpress/api/rest/MarketingRedefiningGetactlist.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | null | null | null |
aliexpress/api/rest/MarketingRedefiningGetactlist.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | 2
|
2021-10-30T17:09:34.000Z
|
2021-11-25T11:50:52.000Z
|
from aliexpress.api.base import RestApi
class AliexpressMarketingRedefiningGetactlistRequest(RestApi):
def __init__(self, domain="gw.api.taobao.com", port=80):
RestApi.__init__(self, domain, port)
self.param_seller_coupon_activity_api_query = None
def getapiname(self):
return "aliexpress.marketing.redefining.getactlist"
| 32.454545
| 62
| 0.756303
|
from aliexpress.api.base import RestApi
class AliexpressMarketingRedefiningGetactlistRequest(RestApi):
def __init__(self, domain="gw.api.taobao.com", port=80):
RestApi.__init__(self, domain, port)
self.param_seller_coupon_activity_api_query = None
def getapiname(self):
return "aliexpress.marketing.redefining.getactlist"
| true
| true
|
f7194fa0d3317f35e8c12bcca9423aaf27363280
| 981
|
py
|
Python
|
tests/test_del_contact.py
|
aogn/python_train
|
40131b24633c9771452813872061ca5335edecd8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_del_contact.py
|
aogn/python_train
|
40131b24633c9771452813872061ca5335edecd8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_del_contact.py
|
aogn/python_train
|
40131b24633c9771452813872061ca5335edecd8
|
[
"Apache-2.0"
] | null | null | null |
from models.contact import Contact
import random
import allure
def test_delete_some_contact(app, db, check_ui):
with allure.step('Check contact'):
if len(db.get_contact_list()) == 0:
app.contact.creation(Contact(first_name="test"))
with allure.step('Given a contact list and contact to delete'):
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
with allure.step('When I delete a contact %s from the list' % contact):
app.contact.delete_contact_by_id(contact.id)
with allure.step('Then the new contact list is equal to the old list without the deleted contact'):
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.group.get_contact_list(), key=Contact.id_or_max)
| 44.590909
| 125
| 0.699286
|
from models.contact import Contact
import random
import allure
def test_delete_some_contact(app, db, check_ui):
with allure.step('Check contact'):
if len(db.get_contact_list()) == 0:
app.contact.creation(Contact(first_name="test"))
with allure.step('Given a contact list and contact to delete'):
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
with allure.step('When I delete a contact %s from the list' % contact):
app.contact.delete_contact_by_id(contact.id)
with allure.step('Then the new contact list is equal to the old list without the deleted contact'):
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.group.get_contact_list(), key=Contact.id_or_max)
| true
| true
|
f7194fe7656b09b6c529b0342d12157fb1da984f
| 710
|
py
|
Python
|
tests/apps/minimal2/application.py
|
blazelibs/blazeweb
|
b120a6a2e38c8b53da2b73443ff242e2d1438053
|
[
"BSD-3-Clause"
] | null | null | null |
tests/apps/minimal2/application.py
|
blazelibs/blazeweb
|
b120a6a2e38c8b53da2b73443ff242e2d1438053
|
[
"BSD-3-Clause"
] | 6
|
2016-11-01T18:42:34.000Z
|
2020-11-16T16:52:14.000Z
|
tests/apps/minimal2/application.py
|
blazelibs/blazeweb
|
b120a6a2e38c8b53da2b73443ff242e2d1438053
|
[
"BSD-3-Clause"
] | 1
|
2020-01-22T18:20:46.000Z
|
2020-01-22T18:20:46.000Z
|
from os import path
from blazeutils import prependsitedir
from blazeweb.application import WSGIApp
from blazeweb.middleware import full_wsgi_stack
from minimal2.config import settings as settingsmod
from blazeweb.scripting import application_entry
# make sure our base module gets put on the path
try:
import minimal2 # noqa
except ImportError:
prependsitedir(path.dirname(settingsmod.basedir), 'apps')
def make_wsgi(profile='Default', use_session=True):
app = WSGIApp(settingsmod, profile)
if not use_session:
app.settings.beaker.enabled = False
return full_wsgi_stack(app)
def script_entry():
application_entry(make_wsgi)
if __name__ == '__main__':
script_entry()
| 25.357143
| 61
| 0.773239
|
from os import path
from blazeutils import prependsitedir
from blazeweb.application import WSGIApp
from blazeweb.middleware import full_wsgi_stack
from minimal2.config import settings as settingsmod
from blazeweb.scripting import application_entry
try:
import minimal2
except ImportError:
prependsitedir(path.dirname(settingsmod.basedir), 'apps')
def make_wsgi(profile='Default', use_session=True):
app = WSGIApp(settingsmod, profile)
if not use_session:
app.settings.beaker.enabled = False
return full_wsgi_stack(app)
def script_entry():
application_entry(make_wsgi)
if __name__ == '__main__':
script_entry()
| true
| true
|
f719503e9bb94c5fe728360593ebcf3637d9ab4e
| 17,459
|
py
|
Python
|
src/graphtastic/clustering.py
|
richardtjornhammar/graphtastic
|
1e64d408ffb3e09d5ad068986c847032d5cfdcbd
|
[
"Apache-2.0"
] | 1
|
2022-02-08T09:53:38.000Z
|
2022-02-08T09:53:38.000Z
|
src/graphtastic/clustering.py
|
richardtjornhammar/graphtastic
|
1e64d408ffb3e09d5ad068986c847032d5cfdcbd
|
[
"Apache-2.0"
] | null | null | null |
src/graphtastic/clustering.py
|
richardtjornhammar/graphtastic
|
1e64d408ffb3e09d5ad068986c847032d5cfdcbd
|
[
"Apache-2.0"
] | 1
|
2022-03-24T12:37:05.000Z
|
2022-03-24T12:37:05.000Z
|
"""
Copyright 2022 RICHARD TJÖRNHAMMAR
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import typing
import sys
try :
from numba import jit
bUseNumba = True
except ImportError :
print ( "ImportError:"," NUMBA. WILL NOT USE IT")
bUseNumba = False
except OSError:
print ( "OSError:"," NUMBA. WILL NOT USE IT")
bUseNumba = False
# THE FOLLOWING KMEANS ALGORITHM IS THE AUTHOR OWN LOCAL VERSION
if bUseNumba :
@jit(nopython=True)
def seeded_kmeans( dat:np.array, cent:np.array ) :
#
# PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# AROUND LINE 2345
# AGAIN CONSIDER USING THE C++ VERSION SINCE IT IS ALOT FASTER
# HERE WE SPEED IT UP USING NUMBA IF THE USER HAS IT INSTALLED AS A MODULE
#
NN , MM = np.shape ( dat )
KK , LL = np.shape ( cent )
if not LL == MM :
print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )
labels = [ int(z) for z in np.zeros(NN) ]
w = labels
counts = np.zeros(KK)
tmp_ce = np.zeros(KK*MM).reshape(KK,MM)
old_error , error , TOL = 0. , 1. , 1.0E-10
while abs ( error - old_error ) > TOL :
old_error = error
error = 0.
counts = counts * 0.
tmp_ce = tmp_ce * 0.
# START BC
for h in range ( NN ) :
min_distance = 1.0E30
for i in range ( KK ) :
distance = np.sum( ( dat[h]-cent[i] )**2 )
if distance < min_distance :
labels[h] = i
min_distance = distance
tmp_ce[labels[h]] += dat[ h ]
counts[labels[h]] += 1.0
error += min_distance
# END BC
for i in range ( KK ) :
if counts[i]>0:
cent[i] = tmp_ce[i]/counts[i]
centroids = cent
return ( labels , centroids )
else :
def seeded_kmeans( dat:np.array, cent:np.array ) :
#
# SLOW SLUGGISH KMEANS WITH A DUBBLE FOR LOOP
# IN PYTHON! WOW! SUCH SPEED!
#
NN , MM = np.shape ( dat )
KK , LL = np.shape ( cent )
if not LL == MM :
print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )
labels = [ int(z) for z in np.zeros(NN) ]
w = labels
counts = np.zeros(KK)
tmp_ce = np.zeros(KK*MM).reshape(KK,MM)
old_error , error , TOL = 0. , 1. , 1.0E-10
while abs ( error - old_error ) > TOL :
old_error = error
error = 0.
counts = counts * 0.
tmp_ce = tmp_ce * 0.
# START BC
for h in range ( NN ) :
min_distance = 1.0E30
for i in range ( KK ) :
distance = np.sum( ( dat[h]-cent[i] )**2 )
if distance < min_distance :
labels[h] = i
min_distance = distance
tmp_ce[labels[h]] += dat[ h ]
counts[labels[h]] += 1.0
error += min_distance
# END BC
for i in range ( KK ) :
if counts[i]>0:
cent[i] = tmp_ce[i]/counts[i]
centroids = cent
return ( labels , centroids )
if bUseNumba :
@jit(nopython=True)
def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :
description = """ This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distance cutoff, you should see all the parts of the system and for a large distance cutoff, you should see the entire system. It has been employed for statistical analysis work as well as the original application where it was employed to segment molecular systems."""
if bVerbose :
print ( "CONNECTIVITY CLUSTERING OF ", np.shape(B), " MATRIX" )
# PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# AROUND LINE 2277
# CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS
# A LOT FASTER
# FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:
# https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf
#
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# ADDED TO RICHTOOLS HERE: https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
nr_sq,mr_sq = np.shape(B)
if nr_sq != mr_sq :
print ( 'ERROR: FAILED' )
N = mr_sq
res , nvisi, s, NN, ndx, C = [0], [0], [0], [0], [0], 0
res .append(0)
for i in range(N) :
nvisi.append(i+1)
res.append(0); res.append(0)
ndx.append(i)
res = res[1:]
nvisi = nvisi[1:]
ndx = ndx[1:]
while ( len(ndx)>0 ) :
i = ndx[-1] ; ndx = ndx[:-1]
NN = []
if ( nvisi[i]>0 ) :
C-=1
for j in range(N) :
if ( B[i,j]<=val ) :
NN.append(j)
while ( len(NN)>0 ) :
# back pop_back
k = NN[-1]; NN = NN[:-1]
nvisi[k] = C
for j in range(N):
if ( B[j,k]<=val ) :
for q in range(N) :
if ( nvisi[q] == j+1 ) :
NN.append(q)
if bVerbose : # VERBOSE
print ( "INFO "+str(-1*C) +" clusters" )
Nc = [ 0 for i in range(-1*C) ]
for q in range(N) :
res[ q*2+1 ] = q;
res[ q*2 ] = nvisi[q]-C;
Nc [res[q*2]]+= 1;
if bVerbose :
print ( " "+str(res[q*2])+" "+str(res[2*q+1]) )
if bVerbose :
for i in range(-1*C) :
print( "CLUSTER " +str(i)+ " HAS " + str(Nc[i]) + " ELEMENTS")
return ( Nc , np.array(res[:-1]).reshape(-1,2) )
else :
def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :
description="""
This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distanc>
"""
if bVerbose :
print ( "CONNECTIVITY CLUSTERING OF ", np.shape(B), " MATRIX" )
# PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# AROUND LINE 2277
# CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS
# A LOT FASTER
# FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:
# https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf
#
nr_sq,mr_sq = np.shape(B)
if nr_sq != mr_sq :
print ( 'ERROR' )
return ( -1 )
N = mr_sq
res , nvisi, s, NN, ndx, C = [], [], [], [], [], 0
res .append(0)
for i in range(N) :
nvisi.append(i+1)
res.append(0); res.append(0)
ndx.append(i)
while ( len(ndx)>0 ) :
i = ndx[-1] ; ndx = ndx[:-1]
NN = []
if ( nvisi[i]>0 ) :
C-=1
for j in range(N) :
if ( B[i,j]<=val ) :
NN.append(j)
while ( len(NN)>0 ) :
# back pop_back
k = NN[-1]; NN = NN[:-1]
nvisi[k] = C
for j in range(N):
if ( B[j,k]<=val ) :
for q in range(N) :
if ( nvisi[q] == j+1 ) :
NN.append(q)
if bVerbose : # VERBOSE
print ( "INFO "+str(-1*C) +" clusters" )
Nc = [ 0 for i in range(-1*C) ]
for q in range(N) :
res[ q*2+1 ] = q;
res[ q*2 ] = nvisi[q]-C;
Nc [res[q*2]]+= 1;
if bVerbose :
print ( " "+str(res[q*2])+" "+str(res[2*q+1]) )
if bVerbose:
for i in range(-1*C) :
print( "CLUSTER " +str(i)+ " HAS " + str(Nc[i]) + " ELEMENTS")
return ( Nc , np.array(res[:-1]).reshape(-1,2) )
if bUseNumba :
@jit(nopython=True)
def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :
#
# AN ALTERNATIVE METHOD
# DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY
# CLUSTERING MODULE (in src/impetuous/clustering.py )
# OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
# THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS
# WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER
#
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
def b2i ( a:list ) -> list :
return ( [ i for b,i in zip(a,range(len(a))) if b ] )
def f2i ( a:list,alf:float ) -> list :
return ( b2i( a<=alf ) )
L = []
for a in distm :
bAdd = True
ids = set( f2i(a,alpha) )
for i in range(len(L)) :
if len( L[i]&ids ) >= n_connections :
L[i] = L[i] | ids
bAdd = False
break
if bAdd and len(ids) >= n_connections :
L .append( ids )
return ( L )
else :
def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :
#
# AN ALTERNATIVE METHOD
# DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY
# CLUSTERING MODULE (in src/impetuous/clustering.py )
# OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# as of commit https://github.com/richardtjornhammar/RichTools/commit/76201bb07687017ae16a4e57cb1ed9fd8c394f18 2016
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
# THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS
# WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER
#
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
def b2i ( a:list ) -> list :
return ( [ i for b,i in zip(a,range(len(a))) if b ] )
def f2i ( a:list,alf:float ) -> list :
return ( b2i( a<=alf ) )
L = []
for a in distm :
bAdd = True
ids = set( f2i(a,alpha) )
for i in range(len(L)) :
if len( L[i]&ids ) >= n_connections :
L[i] = L[i] | ids
bAdd = False
break
if bAdd and len(ids) >= n_connections :
L .append( ids )
return ( L )
def dbscan ( coordinates:np.array = None , distance_matrix:np.array = None ,
eps:float = None, minPts:int = None , bVerbose:bool = False ) -> dict :
def absolute_coordinates_to_distance_matrix ( Q:np.array , power:int=2 , bInvPow:bool=False ) -> np.array :
# UNUSED FALLBACK
DP = np.array( [ np.sum((np.array(p)-np.array(q))**power) for p in Q for q in Q] ).reshape(np.shape(Q)[0],np.shape(Q)[0])
if bInvPow :
DP = DP**(1.0/power)
return ( DP )
if bVerbose :
print ( "THIS IMPLEMENTATION FOR DBSCAN" )
print ( "ASSESSMENT OF NOISE DIFFERS FROM" )
print ( "THE IMPLEMENTATION FOUND IN SKLEARN" )
print ( "ASSUMES LINEAR DISTANCES, NOT SQUARED" )
#
# FOR A DESCRIPTION OF THE CONNECTIVITY READ PAGE 30 (16 INTERNAL NUMBERING) of:
# https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf
#from impetuous.clustering import absolute_coordinates_to_distance_matrix
#from impetuous.clustering import connectivity
import operator
if not operator.xor( coordinates is None , distance_matrix is None ) :
print ( "ONLY SUPPLY A SINGE DATA FRAME OR A DISTANCE MATRIX" )
print ( "dbscan FAILED" )
print ( "DATA MATRICES NEEDS TO BE SPECIFIED WITH \" distance_matrix = ... \" " )
exit(1)
if distance_matrix is None :
from graphtastic.fit import absolute_coordinates_to_distance_matrix
distance_matrix_ = absolute_coordinates_to_distance_matrix ( coordinates )
eps = eps**2.0
else :
distance_matrix_ = distance_matrix
isNoise = np.sum(distance_matrix_<eps,0)-1 < minPts
i_ = 0
for ib in isNoise :
if ib :
distance_matrix_ [ i_] = ( 1+eps )*10.0
distance_matrix_.T[i_] = ( 1+eps )*10.0
distance_matrix_[i_][i_] = 0.
i_ = i_+1
clustercontent , clustercontacts = connectivity ( distance_matrix_ , eps )
return ( {'cluster content': clustercontent, 'clusterid-particleid' : clustercontacts, 'is noise':isNoise} )
def reformat_dbscan_results ( results:dict ) -> dict :
if True :
clusters = {}
for icontent in range(len(results['cluster content'])) :
content = results[ 'cluster content' ][ icontent ]
for c in results [ 'clusterid-particleid' ] :
if c[0] == icontent :
if results[ 'is noise' ][c[1]] :
icontent=-1
if icontent in clusters:
clusters[ icontent ] .append( c[1] )
else :
clusters[ icontent ] = [ c[1] ]
return ( clusters )
| 48.497222
| 462
| 0.452145
|
import numpy as np
import typing
import sys
try :
from numba import jit
bUseNumba = True
except ImportError :
print ( "ImportError:"," NUMBA. WILL NOT USE IT")
bUseNumba = False
except OSError:
print ( "OSError:"," NUMBA. WILL NOT USE IT")
bUseNumba = False
if bUseNumba :
@jit(nopython=True)
def seeded_kmeans( dat:np.array, cent:np.array ) :
NN , MM = np.shape ( dat )
KK , LL = np.shape ( cent )
if not LL == MM :
print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )
labels = [ int(z) for z in np.zeros(NN) ]
w = labels
counts = np.zeros(KK)
tmp_ce = np.zeros(KK*MM).reshape(KK,MM)
old_error , error , TOL = 0. , 1. , 1.0E-10
while abs ( error - old_error ) > TOL :
old_error = error
error = 0.
counts = counts * 0.
tmp_ce = tmp_ce * 0.
for h in range ( NN ) :
min_distance = 1.0E30
for i in range ( KK ) :
distance = np.sum( ( dat[h]-cent[i] )**2 )
if distance < min_distance :
labels[h] = i
min_distance = distance
tmp_ce[labels[h]] += dat[ h ]
counts[labels[h]] += 1.0
error += min_distance
for i in range ( KK ) :
if counts[i]>0:
cent[i] = tmp_ce[i]/counts[i]
centroids = cent
return ( labels , centroids )
else :
def seeded_kmeans( dat:np.array, cent:np.array ) :
NN , MM = np.shape ( dat )
KK , LL = np.shape ( cent )
if not LL == MM :
print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )
labels = [ int(z) for z in np.zeros(NN) ]
w = labels
counts = np.zeros(KK)
tmp_ce = np.zeros(KK*MM).reshape(KK,MM)
old_error , error , TOL = 0. , 1. , 1.0E-10
while abs ( error - old_error ) > TOL :
old_error = error
error = 0.
counts = counts * 0.
tmp_ce = tmp_ce * 0.
for h in range ( NN ) :
min_distance = 1.0E30
for i in range ( KK ) :
distance = np.sum( ( dat[h]-cent[i] )**2 )
if distance < min_distance :
labels[h] = i
min_distance = distance
tmp_ce[labels[h]] += dat[ h ]
counts[labels[h]] += 1.0
error += min_distance
for i in range ( KK ) :
if counts[i]>0:
cent[i] = tmp_ce[i]/counts[i]
centroids = cent
return ( labels , centroids )
if bUseNumba :
@jit(nopython=True)
def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :
description = """ This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distance cutoff, you should see all the parts of the system and for a large distance cutoff, you should see the entire system. It has been employed for statistical analysis work as well as the original application where it was employed to segment molecular systems."""
if bVerbose :
print ( "CONNECTIVITY CLUSTERING OF ", np.shape(B), " MATRIX" )
ape(B)
if nr_sq != mr_sq :
print ( 'ERROR: FAILED' )
N = mr_sq
res , nvisi, s, NN, ndx, C = [0], [0], [0], [0], [0], 0
res .append(0)
for i in range(N) :
nvisi.append(i+1)
res.append(0); res.append(0)
ndx.append(i)
res = res[1:]
nvisi = nvisi[1:]
ndx = ndx[1:]
while ( len(ndx)>0 ) :
i = ndx[-1] ; ndx = ndx[:-1]
NN = []
if ( nvisi[i]>0 ) :
C-=1
for j in range(N) :
if ( B[i,j]<=val ) :
NN.append(j)
while ( len(NN)>0 ) :
k = NN[-1]; NN = NN[:-1]
nvisi[k] = C
for j in range(N):
if ( B[j,k]<=val ) :
for q in range(N) :
if ( nvisi[q] == j+1 ) :
NN.append(q)
if bVerbose :
print ( "INFO "+str(-1*C) +" clusters" )
Nc = [ 0 for i in range(-1*C) ]
for q in range(N) :
res[ q*2+1 ] = q;
res[ q*2 ] = nvisi[q]-C;
Nc [res[q*2]]+= 1;
if bVerbose :
print ( " "+str(res[q*2])+" "+str(res[2*q+1]) )
if bVerbose :
for i in range(-1*C) :
print( "CLUSTER " +str(i)+ " HAS " + str(Nc[i]) + " ELEMENTS")
return ( Nc , np.array(res[:-1]).reshape(-1,2) )
else :
def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :
description="""
This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distanc>
"""
if bVerbose :
print ( "CONNECTIVITY CLUSTERING OF ", np.shape(B), " MATRIX" )
nr_sq,mr_sq = np.shape(B)
if nr_sq != mr_sq :
print ( 'ERROR' )
return ( -1 )
N = mr_sq
res , nvisi, s, NN, ndx, C = [], [], [], [], [], 0
res .append(0)
for i in range(N) :
nvisi.append(i+1)
res.append(0); res.append(0)
ndx.append(i)
while ( len(ndx)>0 ) :
i = ndx[-1] ; ndx = ndx[:-1]
NN = []
if ( nvisi[i]>0 ) :
C-=1
for j in range(N) :
if ( B[i,j]<=val ) :
NN.append(j)
while ( len(NN)>0 ) :
k = NN[-1]; NN = NN[:-1]
nvisi[k] = C
for j in range(N):
if ( B[j,k]<=val ) :
for q in range(N) :
if ( nvisi[q] == j+1 ) :
NN.append(q)
if bVerbose :
print ( "INFO "+str(-1*C) +" clusters" )
Nc = [ 0 for i in range(-1*C) ]
for q in range(N) :
res[ q*2+1 ] = q;
res[ q*2 ] = nvisi[q]-C;
Nc [res[q*2]]+= 1;
if bVerbose :
print ( " "+str(res[q*2])+" "+str(res[2*q+1]) )
if bVerbose:
for i in range(-1*C) :
print( "CLUSTER " +str(i)+ " HAS " + str(Nc[i]) + " ELEMENTS")
return ( Nc , np.array(res[:-1]).reshape(-1,2) )
if bUseNumba :
@jit(nopython=True)
def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
def b2i ( a:list ) -> list :
return ( [ i for b,i in zip(a,range(len(a))) if b ] )
def f2i ( a:list,alf:float ) -> list :
return ( b2i( a<=alf ) )
L = []
for a in distm :
bAdd = True
ids = set( f2i(a,alpha) )
for i in range(len(L)) :
if len( L[i]&ids ) >= n_connections :
L[i] = L[i] | ids
bAdd = False
break
if bAdd and len(ids) >= n_connections :
L .append( ids )
return ( L )
else :
def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
def b2i ( a:list ) -> list :
return ( [ i for b,i in zip(a,range(len(a))) if b ] )
def f2i ( a:list,alf:float ) -> list :
return ( b2i( a<=alf ) )
L = []
for a in distm :
bAdd = True
ids = set( f2i(a,alpha) )
for i in range(len(L)) :
if len( L[i]&ids ) >= n_connections :
L[i] = L[i] | ids
bAdd = False
break
if bAdd and len(ids) >= n_connections :
L .append( ids )
return ( L )
def dbscan ( coordinates:np.array = None , distance_matrix:np.array = None ,
eps:float = None, minPts:int = None , bVerbose:bool = False ) -> dict :
def absolute_coordinates_to_distance_matrix ( Q:np.array , power:int=2 , bInvPow:bool=False ) -> np.array :
DP = np.array( [ np.sum((np.array(p)-np.array(q))**power) for p in Q for q in Q] ).reshape(np.shape(Q)[0],np.shape(Q)[0])
if bInvPow :
DP = DP**(1.0/power)
return ( DP )
if bVerbose :
print ( "THIS IMPLEMENTATION FOR DBSCAN" )
print ( "ASSESSMENT OF NOISE DIFFERS FROM" )
print ( "THE IMPLEMENTATION FOUND IN SKLEARN" )
print ( "ASSUMES LINEAR DISTANCES, NOT SQUARED" )
import operator
if not operator.xor( coordinates is None , distance_matrix is None ) :
print ( "ONLY SUPPLY A SINGE DATA FRAME OR A DISTANCE MATRIX" )
print ( "dbscan FAILED" )
print ( "DATA MATRICES NEEDS TO BE SPECIFIED WITH \" distance_matrix = ... \" " )
exit(1)
if distance_matrix is None :
from graphtastic.fit import absolute_coordinates_to_distance_matrix
distance_matrix_ = absolute_coordinates_to_distance_matrix ( coordinates )
eps = eps**2.0
else :
distance_matrix_ = distance_matrix
isNoise = np.sum(distance_matrix_<eps,0)-1 < minPts
i_ = 0
for ib in isNoise :
if ib :
distance_matrix_ [ i_] = ( 1+eps )*10.0
distance_matrix_.T[i_] = ( 1+eps )*10.0
distance_matrix_[i_][i_] = 0.
i_ = i_+1
clustercontent , clustercontacts = connectivity ( distance_matrix_ , eps )
return ( {'cluster content': clustercontent, 'clusterid-particleid' : clustercontacts, 'is noise':isNoise} )
def reformat_dbscan_results ( results:dict ) -> dict :
if True :
clusters = {}
for icontent in range(len(results['cluster content'])) :
content = results[ 'cluster content' ][ icontent ]
for c in results [ 'clusterid-particleid' ] :
if c[0] == icontent :
if results[ 'is noise' ][c[1]] :
icontent=-1
if icontent in clusters:
clusters[ icontent ] .append( c[1] )
else :
clusters[ icontent ] = [ c[1] ]
return ( clusters )
| true
| true
|
f719505a712591c9db61d06ce1e597d8da79a187
| 235
|
py
|
Python
|
orb_simulator/lexer/regex_ast/regex_epsilon_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 1
|
2022-01-19T22:49:09.000Z
|
2022-01-19T22:49:09.000Z
|
orb_simulator/lexer/regex_ast/regex_epsilon_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 15
|
2021-11-10T14:25:02.000Z
|
2022-02-12T19:17:11.000Z
|
orb_simulator/lexer/regex_ast/regex_epsilon_node.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | null | null | null |
from lexer.regex_ast.regex_atomic_node import AtomicNode
from automaton import Automaton
class EpsilonNode(AtomicNode):
def eval(self):
return Automaton(number_of_states=1, initial_state=0, finalStates=[0], transitions={})
| 39.166667
| 94
| 0.787234
|
from lexer.regex_ast.regex_atomic_node import AtomicNode
from automaton import Automaton
class EpsilonNode(AtomicNode):
def eval(self):
return Automaton(number_of_states=1, initial_state=0, finalStates=[0], transitions={})
| true
| true
|
f71950d1cafe3ade67ae0b9180b0da8119152a85
| 4,293
|
py
|
Python
|
experiments/steven/disentanglement/pointmass/disentanglement_rig.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/steven/disentanglement/pointmass/disentanglement_rig.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/steven/disentanglement/pointmass/disentanglement_rig.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
import os.path as osp
import torch.nn.functional as F
import multiworld.envs.mujoco as mwmj
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.launcher_util import run_experiment
from rlkit.launchers.experiments.disentanglement.launcher import \
disentangled_grill_her_twin_sac_experiment
from rlkit.torch.vae.conv_vae import imsize48_default_architecture
if __name__ == "__main__":
variant = dict(
env_id='Point2DEnv-Train-Axis-Eval-Everything-Images-v0',
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
encoder_kwargs=dict(
hidden_sizes=[400, 300],
hidden_activation=F.tanh,
),
twin_sac_trainer_kwargs=dict(
reward_scale=1,
discount=0.99,
target_update_period=1,
use_automatic_entropy_tuning=True,
),
td3_trainer_kwargs=dict(
tau=1e-3,
),
max_path_length=100,
algo_kwargs=dict(
batch_size=256,
num_epochs=50,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
),
replay_buffer_kwargs=dict(
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
max_size=int(1e6),
ob_keys_to_save=[
'latent_observation',
'latent_desired_goal',
'latent_achieved_goal',
'state_achieved_goal',
'state_desired_goal',
'state_observation',
],
goal_keys=['latent_desired_goal', 'state_desired_goal'],
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
achieved_goal_key='latent_achieved_goal',
vae_exploration_goal_sampling_mode='env',
vae_evaluation_goal_sampling_mode='env',
base_env_exploration_goal_sampling_mode='train',
base_env_evaluation_goal_sampling_mode='test',
vectorized=True,
disentangled_qf_kwargs=dict(
),
vae_wrapped_env_kwargs=dict(
norm_order=1,
reward_params=dict(
type='vectorized_latent_distance',
norm_order=1,
),
),
use_vf_to_compute_policy=True,
use_special_q_function=True,
latent_dim=2,
vae_n_vae_training_kwargs=dict(
vae_class='spatialVAE',
vae_kwargs=dict(
input_channels=3,
),
vae_trainer_kwargs=dict(
lr=1e-3,
beta=0,
),
vae_train_epochs=50,
num_image_examples=30000,
vae_architecture=imsize48_default_architecture,
),
# vae_path="logs/02-25-disentangle-images-relu/02-25-disentangle-images-relu_2020_02_25_12_59_17_id000--s4248/vae.pkl",
save_video=True,
save_video_kwargs=dict(
save_video_period=10,
imsize=48,
),
)
search_space = {
'disentangled_qf_kwargs.encode_state': [True],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_prefix = '{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 2
mode = 'local'
exp_prefix = 'disentangle-extrapolate-vectorized-3'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
disentangled_grill_her_twin_sac_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
num_exps_per_instance=3,
gcp_kwargs=dict(
zone='us-east1-c',
gpu_kwargs=dict(
gpu_model='nvidia-tesla-k80',
num_gpu=1,
)
),
time_in_mins=int(2.5*24*60),
)
| 32.278195
| 127
| 0.575355
|
import os.path as osp
import torch.nn.functional as F
import multiworld.envs.mujoco as mwmj
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.launcher_util import run_experiment
from rlkit.launchers.experiments.disentanglement.launcher import \
disentangled_grill_her_twin_sac_experiment
from rlkit.torch.vae.conv_vae import imsize48_default_architecture
if __name__ == "__main__":
variant = dict(
env_id='Point2DEnv-Train-Axis-Eval-Everything-Images-v0',
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
encoder_kwargs=dict(
hidden_sizes=[400, 300],
hidden_activation=F.tanh,
),
twin_sac_trainer_kwargs=dict(
reward_scale=1,
discount=0.99,
target_update_period=1,
use_automatic_entropy_tuning=True,
),
td3_trainer_kwargs=dict(
tau=1e-3,
),
max_path_length=100,
algo_kwargs=dict(
batch_size=256,
num_epochs=50,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
),
replay_buffer_kwargs=dict(
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
max_size=int(1e6),
ob_keys_to_save=[
'latent_observation',
'latent_desired_goal',
'latent_achieved_goal',
'state_achieved_goal',
'state_desired_goal',
'state_observation',
],
goal_keys=['latent_desired_goal', 'state_desired_goal'],
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
achieved_goal_key='latent_achieved_goal',
vae_exploration_goal_sampling_mode='env',
vae_evaluation_goal_sampling_mode='env',
base_env_exploration_goal_sampling_mode='train',
base_env_evaluation_goal_sampling_mode='test',
vectorized=True,
disentangled_qf_kwargs=dict(
),
vae_wrapped_env_kwargs=dict(
norm_order=1,
reward_params=dict(
type='vectorized_latent_distance',
norm_order=1,
),
),
use_vf_to_compute_policy=True,
use_special_q_function=True,
latent_dim=2,
vae_n_vae_training_kwargs=dict(
vae_class='spatialVAE',
vae_kwargs=dict(
input_channels=3,
),
vae_trainer_kwargs=dict(
lr=1e-3,
beta=0,
),
vae_train_epochs=50,
num_image_examples=30000,
vae_architecture=imsize48_default_architecture,
),
save_video=True,
save_video_kwargs=dict(
save_video_period=10,
imsize=48,
),
)
search_space = {
'disentangled_qf_kwargs.encode_state': [True],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_prefix = '{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 2
mode = 'local'
exp_prefix = 'disentangle-extrapolate-vectorized-3'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
disentangled_grill_her_twin_sac_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
num_exps_per_instance=3,
gcp_kwargs=dict(
zone='us-east1-c',
gpu_kwargs=dict(
gpu_model='nvidia-tesla-k80',
num_gpu=1,
)
),
time_in_mins=int(2.5*24*60),
)
| true
| true
|
f71951497f2af63f6a8b59d46f752d982dea0860
| 8,150
|
py
|
Python
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py
|
TochkaAI/Paddle
|
481ee79fc92304f33165f7ed0679f16c36862cea
|
[
"Apache-2.0"
] | 3
|
2021-06-08T14:24:36.000Z
|
2021-06-08T14:24:38.000Z
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py
|
chenyanlei1/Paddle
|
f249a5f05f0f5832279244d88c8cb4eaaad1fbd4
|
[
"Apache-2.0"
] | 1
|
2021-03-17T07:53:43.000Z
|
2021-03-17T07:53:43.000Z
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py
|
chenyanlei1/Paddle
|
f249a5f05f0f5832279244d88c8cb4eaaad1fbd4
|
[
"Apache-2.0"
] | 1
|
2021-06-17T06:52:01.000Z
|
2021-06-17T06:52:01.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig
class TensorRTSubgraphPassActivationTest(InferencePassTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)
def setUp(self):
self.setUpTensorRTParam()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32")
act_out = self.append_act(data)
out = fluid.layers.batch_norm(act_out, is_test=True)
self.feeds = {
"data": np.random.random([1, 6, 64, 64]).astype("float32"),
}
self.fetch_list = [out]
def append_act(self, x):
return fluid.layers.relu(x)
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
if self.trt_parameters.precision == AnalysisConfig.Precision.Float32:
self.check_output_with_option(use_gpu)
else:
self.check_output_with_option(use_gpu, 1e-3)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TensorRTSubgraphPassLeakyReluTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.leaky_relu(x)
class TensorRTSubgraphPassRelu6Test(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.relu6(x)
class TensorRTSubgraphPassSoftMaxTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.softmax(x)
class TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.sigmoid(x)
class TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.hard_swish(x)
class TensorRTSubgraphPassHardSigmoidTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.hard_sigmoid(x)
class TensorRTSubgraphPassHardSwishPluginTest(
TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.hard_swish(x, threshold=4.0, scale=8.0)
class TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.clip(x, 0, 1)
class TensorRTSubgraphPassTanhTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.tanh(x)
class TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False)
def append_act(self, x):
return fluid.layers.swish(x)
class TensorRTSubgraphPassSwishFp16SerializeTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)
def append_act(self, x):
return fluid.layers.swish(x)
class TensorRTSubgraphPassDynamicSwishFp16SerializeTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)
self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(
{
'data': [1, 6, 8, 8]
}, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)
def append_act(self, x):
return fluid.layers.swish(x)
class TensorRTSubgraphPassPreluAllTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.prelu(x, mode='all')
class TensorRTSubgraphPassPreluChannelTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.prelu(x, mode='channel')
class TensorRTSubgraphPassPreluElementTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.prelu(x, mode='element')
class TensorRTSubgraphPassGeluTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluDynamicTest(TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)
self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(
{
'data': [1, 6, 8, 8]
}, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluFp16Test(TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False)
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluFp16SerializeTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluFp16DynamicTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False)
self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(
{
'data': [1, 6, 8, 8]
}, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluFp16DynamicSerializeTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)
self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(
{
'data': [1, 6, 8, 8]
}, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)
def append_act(self, x):
return fluid.layers.gelu(x)
if __name__ == "__main__":
unittest.main()
| 35.58952
| 89
| 0.699755
|
import os
import shutil
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig
class TensorRTSubgraphPassActivationTest(InferencePassTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)
def setUp(self):
self.setUpTensorRTParam()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 6, 64, 64], dtype="float32")
act_out = self.append_act(data)
out = fluid.layers.batch_norm(act_out, is_test=True)
self.feeds = {
"data": np.random.random([1, 6, 64, 64]).astype("float32"),
}
self.fetch_list = [out]
def append_act(self, x):
return fluid.layers.relu(x)
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
if os.path.exists(self.path + "_opt_cache"):
shutil.rmtree(self.path + "_opt_cache")
if self.trt_parameters.precision == AnalysisConfig.Precision.Float32:
self.check_output_with_option(use_gpu)
else:
self.check_output_with_option(use_gpu, 1e-3)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TensorRTSubgraphPassLeakyReluTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.leaky_relu(x)
class TensorRTSubgraphPassRelu6Test(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.relu6(x)
class TensorRTSubgraphPassSoftMaxTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.softmax(x)
class TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.sigmoid(x)
class TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.hard_swish(x)
class TensorRTSubgraphPassHardSigmoidTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.hard_sigmoid(x)
class TensorRTSubgraphPassHardSwishPluginTest(
TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.hard_swish(x, threshold=4.0, scale=8.0)
class TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.clip(x, 0, 1)
class TensorRTSubgraphPassTanhTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.tanh(x)
class TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False)
def append_act(self, x):
return fluid.layers.swish(x)
class TensorRTSubgraphPassSwishFp16SerializeTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)
def append_act(self, x):
return fluid.layers.swish(x)
class TensorRTSubgraphPassDynamicSwishFp16SerializeTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)
self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(
{
'data': [1, 6, 8, 8]
}, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)
def append_act(self, x):
return fluid.layers.swish(x)
class TensorRTSubgraphPassPreluAllTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.prelu(x, mode='all')
class TensorRTSubgraphPassPreluChannelTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.prelu(x, mode='channel')
class TensorRTSubgraphPassPreluElementTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.prelu(x, mode='element')
class TensorRTSubgraphPassGeluTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluDynamicTest(TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)
self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(
{
'data': [1, 6, 8, 8]
}, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluFp16Test(TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False)
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluFp16SerializeTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluFp16DynamicTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False)
self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(
{
'data': [1, 6, 8, 8]
}, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)
def append_act(self, x):
return fluid.layers.gelu(x)
class TensorRTSubgraphPassGeluFp16DynamicSerializeTest(
TensorRTSubgraphPassActivationTest):
def setUpTensorRTParam(self):
self.enable_trt = True
self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)
self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(
{
'data': [1, 6, 8, 8]
}, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)
def append_act(self, x):
return fluid.layers.gelu(x)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f71951e82d394762ce0671379de3793e5bb9983b
| 1,903
|
py
|
Python
|
UnitTests/test_battery_sensor_features_extractor.py
|
naveenkambham/big_five_personality_machine_learning
|
a4d673e7e72287f2448b6a7b2729e5231b4f7ab2
|
[
"MIT"
] | 8
|
2021-02-22T22:12:32.000Z
|
2022-03-25T15:18:28.000Z
|
UnitTests/test_battery_sensor_features_extractor.py
|
naveenkambham/big_five_personality_machine_learning
|
a4d673e7e72287f2448b6a7b2729e5231b4f7ab2
|
[
"MIT"
] | 1
|
2020-12-29T18:59:39.000Z
|
2021-01-13T17:41:25.000Z
|
UnitTests/test_battery_sensor_features_extractor.py
|
naveenkambham/big_five_personality_machine_learning
|
a4d673e7e72287f2448b6a7b2729e5231b4f7ab2
|
[
"MIT"
] | 4
|
2021-04-08T11:36:33.000Z
|
2022-02-18T14:12:47.000Z
|
"""
Developer : Naveen Kambham
Description: Unit testing for battery sensor feature extractor code. Majority of the data extraction code has to be tested visually by looking at the plots distributions.
"""
#Importing the required libraries.
import unittest
import numpy as np
from FeatureExtraction import battery_sensor_features_extractor
class BatterySensorTestCase(unittest.TestCase):
"""
Tests for battery_sensor_features_extractor.py
"""
def test_TakeMostProbableTimeInStudy(self):
"""
to test the most probable time functionality
:return:
"""
#case 1 multiple values in each day
result= battery_sensor_features_extractor.TakeMostProbableTimeInStudy([1,1,1,1,2,2,3,3,3,3,3,3,3,3],[1,2,0])
self.assertEqual(result,3)
# case 2 only one value in a day
result = battery_sensor_features_extractor.TakeMostProbableTimeInStudy(
[1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3], [1])
self.assertEqual(result, 4)
# case 3 only one value in a day and it is not exists in the study times so far seen
result = battery_sensor_features_extractor.TakeMostProbableTimeInStudy(
[1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3], [0])
self.assertEqual(result, 0)
def test_extract(self):
"""
testing the feature extractor code
:return:
"""
#extracting the features
df_battery=battery_sensor_features_extractor.extract(r"/home/naveen/Data/Shed10/Filtered/battery_events.csv")
# charging should atleast be greater than 0
self.assertTrue(np.min(df_battery['Battery_Charging_Duration'] >=0))
self.assertTrue(np.min(df_battery['CharginTimeDaily'] >=0) and np.max(df_battery['CharginTimeDaily'] <=24))
if __name__ == '__main__':
unittest.main()
| 38.836735
| 171
| 0.656332
|
import unittest
import numpy as np
from FeatureExtraction import battery_sensor_features_extractor
class BatterySensorTestCase(unittest.TestCase):
def test_TakeMostProbableTimeInStudy(self):
result= battery_sensor_features_extractor.TakeMostProbableTimeInStudy([1,1,1,1,2,2,3,3,3,3,3,3,3,3],[1,2,0])
self.assertEqual(result,3)
result = battery_sensor_features_extractor.TakeMostProbableTimeInStudy(
[1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3], [1])
self.assertEqual(result, 4)
result = battery_sensor_features_extractor.TakeMostProbableTimeInStudy(
[1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3], [0])
self.assertEqual(result, 0)
def test_extract(self):
df_battery=battery_sensor_features_extractor.extract(r"/home/naveen/Data/Shed10/Filtered/battery_events.csv")
self.assertTrue(np.min(df_battery['Battery_Charging_Duration'] >=0))
self.assertTrue(np.min(df_battery['CharginTimeDaily'] >=0) and np.max(df_battery['CharginTimeDaily'] <=24))
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7195383ef1c320e1cc9ce4ac28c29823e011af1
| 2,059
|
py
|
Python
|
graph.py
|
jacgonzalez/Graph_Art
|
ea724c3e659aca63107ae9a59cb646f8aba821c6
|
[
"MIT"
] | null | null | null |
graph.py
|
jacgonzalez/Graph_Art
|
ea724c3e659aca63107ae9a59cb646f8aba821c6
|
[
"MIT"
] | null | null | null |
graph.py
|
jacgonzalez/Graph_Art
|
ea724c3e659aca63107ae9a59cb646f8aba821c6
|
[
"MIT"
] | null | null | null |
import math
points = [[4,1], [4,2], [4,3],
[3,1], [3,2], [3,3],
[2,1], [2,2], [2,3],
[1,1], [1,2], [1,3]]
def distance(point1, point2):
return math.sqrt(((point2[0] - point1[0])**2) + ((point2[1] - point1[1])**2))
def k_neighbors(i, points, k):
"""
i: index of a point
points: list of points
k: number of neighbors
"""
distancias_point_i = []
point_i = points[i]
size = len(points)
j = 0
while j < size:
point_j = points[j]
distancias_point_i.append([distance(point_i, point_j), j])
j += 1
distancias_point_i.sort()
#print(distancias_point_i)
result = []
for m in range(len(distancias_point_i)):
result.append(distancias_point_i[m][1])
return result[1:k+1]
#print(distance([1,2], [3,4]))
print(k_neighbors(3, points, 5))
class Graph():
def __init__(self):
self.V = []
self.E = []
def add_vertex(self, info):
self.V.append(info)
self.E.append([])
def add_edge(self, start, finish):
self.E[start].append(finish)
def get_vertex(self, i):
return self.V[i]
def get_neighbors(self, info, k):
return k_neighbors(info, points, k)
def print(self):
print(self.V)
print(self.E)
""" graph1 = Graph()
graph1.add_vertex(0)
graph1.add_vertex(1)
graph1.add_vertex(2)
graph1.add_vertex(3)
graph1.add_vertex(4)
graph1.add_vertex(5)
graph1.add_vertex(6)
graph1.add_vertex(7)
graph1.add_edge(1, 2)
graph1.add_edge(2, 4)
graph1.add_edge(3, 1)
graph1.add_edge(3, 4)
graph1.add_edge(4, 5)
graph1.add_edge(4, 7)
graph1.add_edge(6, 7)
graph1.add_edge(5, 6)
graph1.print() """
def create_nn_graph(points, k):
nn_graph = Graph()
j = 0
size = len(points)
while j < size:
nn_graph.add_vertex(points[j])
neighbors = k_neighbors(j, points, k)
for n in range(len(neighbors)):
nn_graph.add_edge(j, neighbors[n])
j += 1
return nn_graph
graph = create_nn_graph(points, 3)
graph.print()
| 21.447917
| 81
| 0.592521
|
import math
points = [[4,1], [4,2], [4,3],
[3,1], [3,2], [3,3],
[2,1], [2,2], [2,3],
[1,1], [1,2], [1,3]]
def distance(point1, point2):
return math.sqrt(((point2[0] - point1[0])**2) + ((point2[1] - point1[1])**2))
def k_neighbors(i, points, k):
distancias_point_i = []
point_i = points[i]
size = len(points)
j = 0
while j < size:
point_j = points[j]
distancias_point_i.append([distance(point_i, point_j), j])
j += 1
distancias_point_i.sort()
result = []
for m in range(len(distancias_point_i)):
result.append(distancias_point_i[m][1])
return result[1:k+1]
print(k_neighbors(3, points, 5))
class Graph():
def __init__(self):
self.V = []
self.E = []
def add_vertex(self, info):
self.V.append(info)
self.E.append([])
def add_edge(self, start, finish):
self.E[start].append(finish)
def get_vertex(self, i):
return self.V[i]
def get_neighbors(self, info, k):
return k_neighbors(info, points, k)
def print(self):
print(self.V)
print(self.E)
def create_nn_graph(points, k):
nn_graph = Graph()
j = 0
size = len(points)
while j < size:
nn_graph.add_vertex(points[j])
neighbors = k_neighbors(j, points, k)
for n in range(len(neighbors)):
nn_graph.add_edge(j, neighbors[n])
j += 1
return nn_graph
graph = create_nn_graph(points, 3)
graph.print()
| true
| true
|
f71953db54832094263b29e4d88077938efd3aed
| 5,989
|
py
|
Python
|
test/vanilla/Expected/AcceptanceTests/BodyComplex/bodycomplex/aio/operations/_inheritance_operations.py
|
amrElroumy/autorest.python
|
b37af1779f6d53b4fa0d92da62151f8133006f98
|
[
"MIT"
] | null | null | null |
test/vanilla/Expected/AcceptanceTests/BodyComplex/bodycomplex/aio/operations/_inheritance_operations.py
|
amrElroumy/autorest.python
|
b37af1779f6d53b4fa0d92da62151f8133006f98
|
[
"MIT"
] | null | null | null |
test/vanilla/Expected/AcceptanceTests/BodyComplex/bodycomplex/aio/operations/_inheritance_operations.py
|
amrElroumy/autorest.python
|
b37af1779f6d53b4fa0d92da62151f8133006f98
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InheritanceOperations:
"""InheritanceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~bodycomplex.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_valid(self, **kwargs) -> "_models.Siamese":
"""Get complex types that extend others.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Siamese, or the result of cls(response)
:rtype: ~bodycomplex.models.Siamese
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.Siamese"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
# Construct URL
url = self.get_valid.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Siamese", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_valid.metadata = {"url": "/complex/inheritance/valid"} # type: ignore
@distributed_trace_async
async def put_valid(self, complex_body: "_models.Siamese", **kwargs) -> None:
"""Put complex types that extend others.
:param complex_body: Please put a siamese with id=2, name="Siameee", color=green,
breed=persion, which hates 2 dogs, the 1st one named "Potato" with id=1 and food="tomato", and
the 2nd one named "Tomato" with id=-1 and food="french fries".
:type complex_body: ~bodycomplex.models.Siamese
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.put_valid.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Content-Type"] = self._serialize.header("content_type", content_type, "str")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(complex_body, "Siamese")
body_content_kwargs["content"] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
put_valid.metadata = {"url": "/complex/inheritance/valid"} # type: ignore
| 43.398551
| 106
| 0.674236
|
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InheritanceOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_valid(self, **kwargs) -> "_models.Siamese":
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
url = self.get_valid.metadata["url"]
query_parameters = {}
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Siamese", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_valid.metadata = {"url": "/complex/inheritance/valid"}
@distributed_trace_async
async def put_valid(self, complex_body: "_models.Siamese", **kwargs) -> None:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.put_valid.metadata["url"]
query_parameters = {}
header_parameters = {}
header_parameters["Content-Type"] = self._serialize.header("content_type", content_type, "str")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
body_content_kwargs = {}
body_content = self._serialize.body(complex_body, "Siamese")
body_content_kwargs["content"] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
put_valid.metadata = {"url": "/complex/inheritance/valid"}
| true
| true
|
f7195404dfad6b9a9dd9872526043575b12c080b
| 3,669
|
py
|
Python
|
trafficModel.py
|
Mobility-simulation/IDM_Program
|
ac33674373d8560b440562acb5acf82ae5bf4fc6
|
[
"Unlicense"
] | null | null | null |
trafficModel.py
|
Mobility-simulation/IDM_Program
|
ac33674373d8560b440562acb5acf82ae5bf4fc6
|
[
"Unlicense"
] | null | null | null |
trafficModel.py
|
Mobility-simulation/IDM_Program
|
ac33674373d8560b440562acb5acf82ae5bf4fc6
|
[
"Unlicense"
] | null | null | null |
import tkinter as tk
from system.operation import Operation
from model.world import World
import settings
root = tk.Tk()
img = tk.PhotoImage(file='png/sports-car.png')
root.tk.call('wm', 'iconphoto', root._w, img)
menu = tk.Menu(root)
root.config(menu=menu)
root.protocol("WM_DELETE_WINDOW", lambda: op.terminate(root))
toolbar = tk.Frame(root)
function = tk.Frame(toolbar)
info = tk.Frame(toolbar)
world = World()
world.load()
buttonGroup = tk.Frame(function)
sliderGroup = tk.Frame(function)
play = tk.Button(buttonGroup, text="Action")
playPNG = tk.PhotoImage(file="png/play-button.png")
pausePNG = tk.PhotoImage(file="png/pause.png")
play.config(compound=tk.LEFT, image=playPNG, width="70",
height="24", bg="#FFFFFF", command=lambda: op.runModel())
play.pack(side=tk.LEFT, padx=2, pady=2)
refresh = tk.Button(buttonGroup, text="Reload")
refreshPNG = tk.PhotoImage(file="png/refresh-button.png")
refresh.config(compound=tk.LEFT, image=refreshPNG, width="70",
height="24", bg="#FFFFFF", command=lambda: op.refresh())
refresh.pack(side=tk.LEFT, padx=2, pady=2)
debug = tk.Button(buttonGroup, text="Debug")
debugPNG = tk.PhotoImage(file="png/debug.png")
debug.config(compound=tk.LEFT, image=debugPNG, width="70",
height="24", bg="#FFFFFF", command=lambda: op.debugSwitch())
debug.pack(side=tk.LEFT, padx=2, pady=2)
gridMap = tk.Button(buttonGroup, text="New Map")
mapPNG = tk.PhotoImage(file="png/map.png")
gridMap.config(compound=tk.LEFT, image=mapPNG, width="70",
height="24", bg="#FFFFFF", command=lambda: op.generateMap())
gridMap.pack(side=tk.LEFT, padx=2, pady=2)
timeSliderName = tk.Entry(sliderGroup, width='10')
timeSliderName.grid(row=0, column=0)
timeSliderName.insert(0, "Time scale")
timeSlider = tk.Scale(sliderGroup, from_=settings.setDict["timeMin"],
to=settings.setDict["timeMax"], orient=tk.HORIZONTAL,
troughcolor="#90C3D4", bg="#FFFFFF")
timeSlider.grid(row=1, column=0)
carSliderName = tk.Entry(sliderGroup, width='10')
carSliderName.grid(row=0, column=1)
carSliderName.insert(0, "Cars Number")
carSlider = tk.Scale(sliderGroup, from_=settings.setDict["carMin"],
to=settings.setDict["carMax"], orient=tk.HORIZONTAL,
troughcolor="#90C3D4", bg="#FFFFFF")
carSlider.grid(row=1, column=1)
systemName = tk.Entry(info, width='10')
systemName.grid(row=0, column=0)
systemName.insert(0, "System")
systemText = tk.Text(info, height=2, width=40)
systemText.grid(row=1, column=0)
roadName = tk.Entry(info, width='10')
roadName.grid(row=0, column=1)
roadName.insert(0, "Road info")
roadText = tk.Text(info, height=2, width=45)
roadText.grid(row=1, column=1)
carName = tk.Entry(info, width='10')
carName.grid(row=0, column=2)
carName.insert(0, "Car info")
carText = tk.Text(info, height=2, width=40)
carText.grid(row=1, column=2)
toolDict = dict()
toolDict['playBtn'] = play
toolDict['playPNG'] = playPNG
toolDict['pausePNG'] = pausePNG
toolDict['carText'] = carText
toolDict['roadText'] = roadText
toolDict['systemText'] = systemText
toolDict['carSlider'] = carSlider
toolDict['timeSlider'] = timeSlider
toolDict['debugBtn'] = debug
screen = tk.Frame(root)
op = Operation(screen, toolDict, world)
op.pack(fill="both", expand=True)
buttonGroup.config(bg="#90C3D4")
buttonGroup.pack(side=tk.LEFT)
sliderGroup.config(bg="#90C3D4")
sliderGroup.pack(side=tk.LEFT)
function.config(bg="#90C3D4")
function.pack(side=tk.LEFT)
info.config(bg="#90C3D4")
info.pack(side=tk.RIGHT)
toolbar.config(bg="#90C3D4")
toolbar.pack(side=tk.TOP, fill=tk.X)
screen.pack(side=tk.BOTTOM, fill=tk.X)
root.mainloop()
| 29.829268
| 75
| 0.706187
|
import tkinter as tk
from system.operation import Operation
from model.world import World
import settings
root = tk.Tk()
img = tk.PhotoImage(file='png/sports-car.png')
root.tk.call('wm', 'iconphoto', root._w, img)
menu = tk.Menu(root)
root.config(menu=menu)
root.protocol("WM_DELETE_WINDOW", lambda: op.terminate(root))
toolbar = tk.Frame(root)
function = tk.Frame(toolbar)
info = tk.Frame(toolbar)
world = World()
world.load()
buttonGroup = tk.Frame(function)
sliderGroup = tk.Frame(function)
play = tk.Button(buttonGroup, text="Action")
playPNG = tk.PhotoImage(file="png/play-button.png")
pausePNG = tk.PhotoImage(file="png/pause.png")
play.config(compound=tk.LEFT, image=playPNG, width="70",
height="24", bg="#FFFFFF", command=lambda: op.runModel())
play.pack(side=tk.LEFT, padx=2, pady=2)
refresh = tk.Button(buttonGroup, text="Reload")
refreshPNG = tk.PhotoImage(file="png/refresh-button.png")
refresh.config(compound=tk.LEFT, image=refreshPNG, width="70",
height="24", bg="#FFFFFF", command=lambda: op.refresh())
refresh.pack(side=tk.LEFT, padx=2, pady=2)
debug = tk.Button(buttonGroup, text="Debug")
debugPNG = tk.PhotoImage(file="png/debug.png")
debug.config(compound=tk.LEFT, image=debugPNG, width="70",
height="24", bg="#FFFFFF", command=lambda: op.debugSwitch())
debug.pack(side=tk.LEFT, padx=2, pady=2)
gridMap = tk.Button(buttonGroup, text="New Map")
mapPNG = tk.PhotoImage(file="png/map.png")
gridMap.config(compound=tk.LEFT, image=mapPNG, width="70",
height="24", bg="#FFFFFF", command=lambda: op.generateMap())
gridMap.pack(side=tk.LEFT, padx=2, pady=2)
timeSliderName = tk.Entry(sliderGroup, width='10')
timeSliderName.grid(row=0, column=0)
timeSliderName.insert(0, "Time scale")
timeSlider = tk.Scale(sliderGroup, from_=settings.setDict["timeMin"],
to=settings.setDict["timeMax"], orient=tk.HORIZONTAL,
troughcolor="#90C3D4", bg="#FFFFFF")
timeSlider.grid(row=1, column=0)
carSliderName = tk.Entry(sliderGroup, width='10')
carSliderName.grid(row=0, column=1)
carSliderName.insert(0, "Cars Number")
carSlider = tk.Scale(sliderGroup, from_=settings.setDict["carMin"],
to=settings.setDict["carMax"], orient=tk.HORIZONTAL,
troughcolor="#90C3D4", bg="#FFFFFF")
carSlider.grid(row=1, column=1)
systemName = tk.Entry(info, width='10')
systemName.grid(row=0, column=0)
systemName.insert(0, "System")
systemText = tk.Text(info, height=2, width=40)
systemText.grid(row=1, column=0)
roadName = tk.Entry(info, width='10')
roadName.grid(row=0, column=1)
roadName.insert(0, "Road info")
roadText = tk.Text(info, height=2, width=45)
roadText.grid(row=1, column=1)
carName = tk.Entry(info, width='10')
carName.grid(row=0, column=2)
carName.insert(0, "Car info")
carText = tk.Text(info, height=2, width=40)
carText.grid(row=1, column=2)
toolDict = dict()
toolDict['playBtn'] = play
toolDict['playPNG'] = playPNG
toolDict['pausePNG'] = pausePNG
toolDict['carText'] = carText
toolDict['roadText'] = roadText
toolDict['systemText'] = systemText
toolDict['carSlider'] = carSlider
toolDict['timeSlider'] = timeSlider
toolDict['debugBtn'] = debug
screen = tk.Frame(root)
op = Operation(screen, toolDict, world)
op.pack(fill="both", expand=True)
buttonGroup.config(bg="#90C3D4")
buttonGroup.pack(side=tk.LEFT)
sliderGroup.config(bg="#90C3D4")
sliderGroup.pack(side=tk.LEFT)
function.config(bg="#90C3D4")
function.pack(side=tk.LEFT)
info.config(bg="#90C3D4")
info.pack(side=tk.RIGHT)
toolbar.config(bg="#90C3D4")
toolbar.pack(side=tk.TOP, fill=tk.X)
screen.pack(side=tk.BOTTOM, fill=tk.X)
root.mainloop()
| true
| true
|
f719543be7e7a689ebcb0b8ad3fa69e2a94998d6
| 3,217
|
py
|
Python
|
setup.py
|
groupserver/gs.profile.status.send
|
d33c7ab535565d185a2ef95bf00c92b9ffeb8af7
|
[
"ZPL-2.1"
] | null | null | null |
setup.py
|
groupserver/gs.profile.status.send
|
d33c7ab535565d185a2ef95bf00c92b9ffeb8af7
|
[
"ZPL-2.1"
] | null | null | null |
setup.py
|
groupserver/gs.profile.status.send
|
d33c7ab535565d185a2ef95bf00c92b9ffeb8af7
|
[
"ZPL-2.1"
] | null | null | null |
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2012, 2013, 2014, 2015 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
import codecs
import os
import sys
from setuptools import setup, find_packages
from version import get_version
version = get_version()
# The argparse library was added to core in Python 2.7
core = ['setuptools',
'blessings',
'gs.config', # Note: without zope-support
'gs.form', ]
if sys.version_info > (2, 6):
requires = core
else:
requires = core + ['argparse']
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
with codecs.open(os.path.join("docs", "HISTORY.rst"),
encoding='utf-8') as f:
long_description += '\n' + f.read()
setup(
name='gs.profile.status.send',
version=version,
description="Send the profile-status notifications out",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
"Intended Audience :: Developers",
'License :: OSI Approved :: Zope Public License',
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Communications :: Email',
'Topic :: Communications :: Email :: Mailing List Servers',
'Topic :: Communications :: Email :: Mail Transport Agents',
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='groupserver, profile, notification',
author='Michael JasonSmith',
author_email='mpj17@onlinegroups.net',
url='https://github.com/groupserver/gs.profile.status.send/',
license='ZPL 2.1',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['gs', 'gs.profile', 'gs.profile.status', ],
include_package_data=True,
zip_safe=False,
install_requires=requires,
extras_require={'docs': ['Sphinx'], },
entry_points={
'console_scripts': [
'sendprofile = gs.profile.status.send.script:main',
],
# --=mpj17=-- Entry points are the work of the devil. Some time
# you, me and Mr Soldering Iron are going to have a little chat
# about how to do things better.
},
)
| 38.297619
| 76
| 0.618278
| true
| true
|
|
f71954b599b64b8d33b4eb0854424d9b156c78cd
| 64,577
|
py
|
Python
|
test/integration/component/test_redundant_router_network_rules.py
|
lafferty/cshv3
|
ee0ff7ac240bd24e19db6bd3fb9869dd087442ba
|
[
"Apache-2.0"
] | 2
|
2015-05-19T05:04:30.000Z
|
2016-09-07T00:33:17.000Z
|
test/integration/component/test_redundant_router_network_rules.py
|
lafferty/cshv3
|
ee0ff7ac240bd24e19db6bd3fb9869dd087442ba
|
[
"Apache-2.0"
] | null | null | null |
test/integration/component/test_redundant_router_network_rules.py
|
lafferty/cshv3
|
ee0ff7ac240bd24e19db6bd3fb9869dd087442ba
|
[
"Apache-2.0"
] | 2
|
2017-07-07T14:49:03.000Z
|
2018-07-31T06:38:42.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.integration.lib.base import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.common import *
#Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import *
class Services:
"""Test Services for customer defects
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"static_nat": {
"startport": 22,
"endport": 22,
"protocol": "TCP"
},
"network_offering": {
"name": 'Network offering-RVR services',
"displaytext": 'Network off-RVR services',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Vpn": 'VirtualRouter',
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'VirtualRouter',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
"serviceCapabilityList": {
"SourceNat": {
"SupportedSourceNatTypes": "peraccount",
"RedundantRouter": "true",
},
"lb": {
"SupportedLbIsolation": "dedicated"
},
},
},
"host": {
"username": "root",
"password": "password",
"publicport": 22,
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
},
"lbrule": {
"name": "SSH",
"alg": "roundrobin",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 22,
"openfirewall": True,
},
"natrule": {
"privateport": 22,
"publicport": 22,
"protocol": "TCP"
},
"natrule_221": {
"privateport": 22,
"publicport": 221,
"protocol": "TCP"
},
"fw_rule": {
"startport": 1,
"endport": 6000,
"cidr": '55.55.0.0/11',
# Any network (For creating FW rule)
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
}
class TestRedundantRouterRulesLifeCycle(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestRedundantRouterRulesLifeCycle,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=True
)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls._cleanup = [
cls.service_offering,
cls.network_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.debug("Warning: Exception during cleanup : %s" % e)
#raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_networkRules_afterRebootRouters(self):
"""Test network rules after master & backup routers rebooted
"""
# Steps to validate
# 1. listNetworks should show the created network in allocated state
# 2. listRouters returns no running routers
# 3. VMs should be deployed and in Running state
# 4. should list MASTER and BACKUP routers
# 5. listPublicIpAddresses for networkid should show acquired IP addr
# 6. listStaticNats for the network associated
# 7. listFirewallRules should show allowed ports open
# 8. ssh to succeed to the guestVM
# 9. listPublicIpAddresses for networkid should show acquired IP addr
# 10. listPortForwardRules to show open ports 221, 222
# 11. ssh should succeed for both ports
# 12. listPublicIpAddresses for networkid should show acquired IP addr
# 13 and 14. listLoadBalancerRules should show associated VMs for
# public IP
# 15. ssh should succeed to the user VMs
# 16. listRouters should show one Router in MASTER state and Running
# 17. ssh should work for PF, FW, and LB ips
# 18. listRouters should show both routers MASTER and BACKUP in
# Running state
# 19. listPortForwardingRules, listFirewallRules, listLoadBalancerRule
# should return empty response
# 20. listPublicIpAddresses should show now more addresses
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Starting router ID: %s" % master_router.id)
for router in routers:
try:
self.debug("Rebooting router ID: %s" % master_router.id)
#Stop the router
cmd = rebootRouter.rebootRouterCmd()
cmd.id = router.id
self.apiclient.rebootRouter(cmd)
except Exception as e:
self.fail("Failed to reboot router..")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_applyRules_restartRvRNetwork(self):
"""Test apply rules after network restart
"""
# Steps to validate
# 1. listNetworks should show the created network in allocated state
# 2. listRouters returns no running routers
# 3. VMs should be deployed and in Running state
# 4. should list MASTER and BACKUP routers
# 5. listPublicIpAddresses for networkid should show acquired IP addr
# 6. listStaticNats for the network associated
# 7. listFirewallRules should show allowed ports open
# 8. ssh to succeed to the guestVM
# 9. listPublicIpAddresses for networkid should show acquired IP addr
# 10. listPortForwardRules to show open ports 221, 222
# 11. ssh should succeed for both ports
# 12. listPublicIpAddresses for networkid should show acquired IP addr
# 13 and 14. listLoadBalancerRules should show associated VMs for
# public IP
# 15. ssh should succeed to the user VMs
# 16. listRouters should show one Router in MASTER state and Running &
# one in BACKUP and Running
# 17. ssh should work for PF, FW, and LB ips
# 18. listRouters should show one Router in MASTER state and Running &
# one in BACKUP and Running
# 19. ssh should work for PF, FW, and LB ips
# 20. listPortForwardingRules, listFirewallRules, listLoadBalancerRule
# should return empty response
# 21. listPublicIpAddresses should show now more addresses
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Restarting network ID: %s with cleanup true" %
network.id)
try:
network.restart(self.apiclient, cleanup=True)
except Exception as e:
self.fail("Failed to cleanup network")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Restarting network ID: %s with cleanup false" %
network.id)
try:
network.restart(self.apiclient, cleanup=False)
except Exception as e:
self.fail("Failed to cleanup network")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_apply_and__delete_NetworkRulesOnRvR(self):
"""Test apply and delete network rules on redundant router
"""
# Steps to validate
# 1. listNetworks should show the created network in allocated state
# 2. listRouters returns no running routers
# 3. VMs should be deployed and in Running state
# 4. should list MASTER and BACKUP routers
# 5. listPublicIpAddresses for networkid should show acquired IP
# 6. listRemoteAccessVpns for the network associated should show the
# VPN created
# 7. listRemoteAccessVpns for the network associated should return
# empty response
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh", "needle"])
def test_applyNetworkRules_MasterDown_deleteNetworkRules(self):
"""Test apply network rules when master down and delete network rules
"""
# Steps to validate
# 1. listNetworks should show the created network in allocated state
# 2. listRouters returns no running routers
# 3. VMs should be deployed and in Running state
# 4. should list MASTER and BACKUP routers
# 5. listPublicIpAddresses for networkid should show acquired IP addr
# 6. listStaticNats for the network associated
# 7. listFirewallRules should show allowed ports open
# 8. ssh to succeed to the guestVM
# 9. listPublicIpAddresses for networkid should show acquired IP addr
# 10. listPortForwardRules to show open ports 221, 222
# 11. ssh should succeed for both ports
# 12. listPublicIpAddresses for networkid should show acquired IP addr
# 13 and 14. listLoadBalancerRules should show associated VMs for
# public IP
# 15. ssh should succeed to the user VMs
# 16. listRouters should show one Router in MASTER state and Running
# 17. ssh should work for PF, FW, and LB ips
# 18. listRouters should show both routers MASTER and BACKUP in
# Running state
# 19. listPortForwardingRules, listFirewallRules, listLoadBalancerRule
# should return empty response
# 20. listPublicIpAddresses should show now more addresses
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Stopping router ID: %s" % master_router.id)
try:
Router.stop(self.apiclient, id=master_router.id)
except Exception as e:
self.fail("Failed to stop master router..")
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"Public Ip Address in the network created (%s) and listed (%s) do not match" % (
public_ips[0].ipaddress, public_ip.ipaddress.ipaddress)
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Starting router ID: %s" % master_router.id)
try:
Router.start(self.apiclient, id=master_router.id)
except Exception as e:
self.fail("Failed to start master router..")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
return
| 45.253679
| 128
| 0.429843
|
from nose.plugins.attrib import attr
from marvin.integration.lib.base import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.common import *
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import *
class Services:
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"static_nat": {
"startport": 22,
"endport": 22,
"protocol": "TCP"
},
"network_offering": {
"name": 'Network offering-RVR services',
"displaytext": 'Network off-RVR services',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Vpn": 'VirtualRouter',
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'VirtualRouter',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
"serviceCapabilityList": {
"SourceNat": {
"SupportedSourceNatTypes": "peraccount",
"RedundantRouter": "true",
},
"lb": {
"SupportedLbIsolation": "dedicated"
},
},
},
"host": {
"username": "root",
"password": "password",
"publicport": 22,
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
},
"lbrule": {
"name": "SSH",
"alg": "roundrobin",
"privateport": 22,
"publicport": 22,
"openfirewall": True,
},
"natrule": {
"privateport": 22,
"publicport": 22,
"protocol": "TCP"
},
"natrule_221": {
"privateport": 22,
"publicport": 221,
"protocol": "TCP"
},
"fw_rule": {
"startport": 1,
"endport": 6000,
"cidr": '55.55.0.0/11',
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
}
class TestRedundantRouterRulesLifeCycle(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestRedundantRouterRulesLifeCycle,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=True
)
cls.network_offering.update(cls.api_client, state='Enabled')
cls._cleanup = [
cls.service_offering,
cls.network_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.debug("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_networkRules_afterRebootRouters(self):
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Starting router ID: %s" % master_router.id)
for router in routers:
try:
self.debug("Rebooting router ID: %s" % master_router.id)
cmd = rebootRouter.rebootRouterCmd()
cmd.id = router.id
self.apiclient.rebootRouter(cmd)
except Exception as e:
self.fail("Failed to reboot router..")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_applyRules_restartRvRNetwork(self):
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Restarting network ID: %s with cleanup true" %
network.id)
try:
network.restart(self.apiclient, cleanup=True)
except Exception as e:
self.fail("Failed to cleanup network")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Restarting network ID: %s with cleanup false" %
network.id)
try:
network.restart(self.apiclient, cleanup=False)
except Exception as e:
self.fail("Failed to cleanup network")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_apply_and__delete_NetworkRulesOnRvR(self):
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh", "needle"])
def test_applyNetworkRules_MasterDown_deleteNetworkRules(self):
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Stopping router ID: %s" % master_router.id)
try:
Router.stop(self.apiclient, id=master_router.id)
except Exception as e:
self.fail("Failed to stop master router..")
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
static_nat = StaticNATRule.create(
self.apiclient,
self.services["fw_rule"],
ipaddressid=public_ip.ipaddress.id
)
static_nat.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=virtual_machine.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
associatednetworkid=network.id,
listall=True,
isstaticnat=True
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip.ipaddress.ipaddress,
"Public Ip Address in the network created (%s) and listed (%s) do not match" % (
public_ips[0].ipaddress, public_ip.ipaddress.ipaddress)
)
self.debug("creating a FW rule on IP: %s" %
public_ip.ipaddress.ipaddress)
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created a firewall rule on 22 port of IP: %s" %
public_ip.ipaddress.ipaddress)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip.ipaddress.ipaddress)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network.id
))
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_221"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=True
)
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule_221"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Associating public IP for network: %s" % network.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network.id
)
self.debug("Adding %s to the LB rule %s" % (
virtual_machine.name,
lb_rule.name
))
lb_rule.assign(self.apiclient, [virtual_machine])
self.debug("Trying to SSH into the virtual machine")
try:
virtual_machine.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH to guest VM succeeded")
except Exception as e:
self.fail("SSH to guest VM failed: %s" % e)
self.debug("Starting router ID: %s" % master_router.id)
try:
Router.start(self.apiclient, id=master_router.id)
except Exception as e:
self.fail("Failed to start master router..")
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running"
)
return
| true
| true
|
f719550eb352bbb1095167f47a860d9ae8edd55b
| 283
|
py
|
Python
|
src/com/python/socket/udp_client.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
src/com/python/socket/udp_client.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
src/com/python/socket/udp_client.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 2016年8月10日
@author: Administrator
'''
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for data in [b'Michael', b'Tracy', b'Sarah']:
# 发送数据:
s.sendto(data, ('127.0.0.1', 9999))
# 接收数据:
print(s.recv(1024).decode('utf-8'))
s.close()
| 17.6875
| 52
| 0.636042
|
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for data in [b'Michael', b'Tracy', b'Sarah']:
s.sendto(data, ('127.0.0.1', 9999))
print(s.recv(1024).decode('utf-8'))
s.close()
| true
| true
|
f71955e2cbcdd5db22e26670801fd917e9622190
| 431
|
py
|
Python
|
app/core/migrations/0005_recipe_image.py
|
JopeAlgorta/django-recipe-api
|
a92ae3b206682564d147618f83794edaf2c9a785
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_recipe_image.py
|
JopeAlgorta/django-recipe-api
|
a92ae3b206682564d147618f83794edaf2c9a785
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_recipe_image.py
|
JopeAlgorta/django-recipe-api
|
a92ae3b206682564d147618f83794edaf2c9a785
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.14 on 2020-07-31 14:42
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
| 21.55
| 93
| 0.62181
|
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
| true
| true
|
f71956fe3c17887634434668acacfe3048dd4355
| 4,815
|
py
|
Python
|
src/mem/ruby/network/garnet/GarnetNetwork.py
|
NickSica/gem5
|
87ffe8b4f75f3a6938144e4edc1ba0ba6f3f0610
|
[
"BSD-3-Clause"
] | null | null | null |
src/mem/ruby/network/garnet/GarnetNetwork.py
|
NickSica/gem5
|
87ffe8b4f75f3a6938144e4edc1ba0ba6f3f0610
|
[
"BSD-3-Clause"
] | null | null | null |
src/mem/ruby/network/garnet/GarnetNetwork.py
|
NickSica/gem5
|
87ffe8b4f75f3a6938144e4edc1ba0ba6f3f0610
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2008 Princeton University
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Tushar Krishna
#
from m5.params import *
from m5.proxy import *
from m5.objects.Network import RubyNetwork
from m5.objects.BasicRouter import BasicRouter
from m5.objects.ClockedObject import ClockedObject
class GarnetNetwork(RubyNetwork):
type = 'GarnetNetwork'
cxx_header = "mem/ruby/network/garnet/GarnetNetwork.hh"
cxx_class = 'gem5::ruby::garnet::GarnetNetwork'
num_rows = Param.Int(0, "number of rows if 2D (mesh/torus/..) topology");
num_cols = Param.Int(0, "number of columns if 2D (mesh/torus..) topology");
z_depth = Param.Int(0, "length of the z-dimension");
num_chiplets_x = Param.Int(0, "number of chiplets in the x-dimension");
num_chiplets_y = Param.Int(0, "number of chiplets in the y-dimension");
nu_chiplets_input = Param.String("", "non-uniform chiplet designation (start col, start row, end col, end row)");
wireless_input = Param.String("", "wireless router designation (if random, then string will define number of wireless antennas to be placed per layer, if user-defined, then string will define exact routers the antennas will be placed on x,y,z,x,y,z...");
wireless_input_pattern = Param.String("", "wireless antenna placement pattern (r=random, u=user-defined)");
wireless_width = Param.Int(0, "width of wireless routers");
wired_width = Param.Int(0, "width of wired routers");
ni_flit_size = Param.UInt32(16, "network interface flit size in bytes")
vcs_per_vnet = Param.UInt32(4, "virtual channels per virtual network");
buffers_per_data_vc = Param.UInt32(4, "buffers per data virtual channel");
buffers_per_ctrl_vc = Param.UInt32(1, "buffers per ctrl virtual channel");
routing_algorithm = Param.Int(0,
"0: Weight-based Table, 1: XY, 2:XYZ, 3:U_CHIPLETS, 4:NU_CHIPLETS, 5:WIRELESS");
enable_fault_model = Param.Bool(False, "enable network fault model");
fault_model = Param.FaultModel(NULL, "network fault model");
garnet_deadlock_threshold = Param.UInt32(50000,
"network-level deadlock threshold")
class GarnetNetworkInterface(ClockedObject):
type = 'GarnetNetworkInterface'
cxx_class = 'gem5::ruby::garnet::NetworkInterface'
cxx_header = "mem/ruby/network/garnet/NetworkInterface.hh"
id = Param.UInt32("ID in relation to other network interfaces")
vcs_per_vnet = Param.UInt32(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.UInt32(Parent.number_of_virtual_networks,
"number of virtual networks")
garnet_deadlock_threshold = Param.UInt32(Parent.garnet_deadlock_threshold,
"network-level deadlock threshold")
class GarnetRouter(BasicRouter):
type = 'GarnetRouter'
cxx_class = 'gem5::ruby::garnet::Router'
cxx_header = "mem/ruby/network/garnet/Router.hh"
vcs_per_vnet = Param.UInt32(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.UInt32(Parent.number_of_virtual_networks,
"number of virtual networks")
width = Param.UInt32(Parent.ni_flit_size,
"bit width supported by the router")
| 55.344828
| 258
| 0.727726
|
from m5.params import *
from m5.proxy import *
from m5.objects.Network import RubyNetwork
from m5.objects.BasicRouter import BasicRouter
from m5.objects.ClockedObject import ClockedObject
class GarnetNetwork(RubyNetwork):
type = 'GarnetNetwork'
cxx_header = "mem/ruby/network/garnet/GarnetNetwork.hh"
cxx_class = 'gem5::ruby::garnet::GarnetNetwork'
num_rows = Param.Int(0, "number of rows if 2D (mesh/torus/..) topology");
num_cols = Param.Int(0, "number of columns if 2D (mesh/torus..) topology");
z_depth = Param.Int(0, "length of the z-dimension");
num_chiplets_x = Param.Int(0, "number of chiplets in the x-dimension");
num_chiplets_y = Param.Int(0, "number of chiplets in the y-dimension");
nu_chiplets_input = Param.String("", "non-uniform chiplet designation (start col, start row, end col, end row)");
wireless_input = Param.String("", "wireless router designation (if random, then string will define number of wireless antennas to be placed per layer, if user-defined, then string will define exact routers the antennas will be placed on x,y,z,x,y,z...");
wireless_input_pattern = Param.String("", "wireless antenna placement pattern (r=random, u=user-defined)");
wireless_width = Param.Int(0, "width of wireless routers");
wired_width = Param.Int(0, "width of wired routers");
ni_flit_size = Param.UInt32(16, "network interface flit size in bytes")
vcs_per_vnet = Param.UInt32(4, "virtual channels per virtual network");
buffers_per_data_vc = Param.UInt32(4, "buffers per data virtual channel");
buffers_per_ctrl_vc = Param.UInt32(1, "buffers per ctrl virtual channel");
routing_algorithm = Param.Int(0,
"0: Weight-based Table, 1: XY, 2:XYZ, 3:U_CHIPLETS, 4:NU_CHIPLETS, 5:WIRELESS");
enable_fault_model = Param.Bool(False, "enable network fault model");
fault_model = Param.FaultModel(NULL, "network fault model");
garnet_deadlock_threshold = Param.UInt32(50000,
"network-level deadlock threshold")
class GarnetNetworkInterface(ClockedObject):
type = 'GarnetNetworkInterface'
cxx_class = 'gem5::ruby::garnet::NetworkInterface'
cxx_header = "mem/ruby/network/garnet/NetworkInterface.hh"
id = Param.UInt32("ID in relation to other network interfaces")
vcs_per_vnet = Param.UInt32(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.UInt32(Parent.number_of_virtual_networks,
"number of virtual networks")
garnet_deadlock_threshold = Param.UInt32(Parent.garnet_deadlock_threshold,
"network-level deadlock threshold")
class GarnetRouter(BasicRouter):
type = 'GarnetRouter'
cxx_class = 'gem5::ruby::garnet::Router'
cxx_header = "mem/ruby/network/garnet/Router.hh"
vcs_per_vnet = Param.UInt32(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.UInt32(Parent.number_of_virtual_networks,
"number of virtual networks")
width = Param.UInt32(Parent.ni_flit_size,
"bit width supported by the router")
| true
| true
|
f71957077b0dab80c0b89a88133f80aa1b7d1e31
| 23,266
|
py
|
Python
|
data_generators/data_options.py
|
Zack-Quintana/underdog-devs-ds-a
|
743c1b977eb52c1ca536df927ab1474949f1bd90
|
[
"MIT"
] | null | null | null |
data_generators/data_options.py
|
Zack-Quintana/underdog-devs-ds-a
|
743c1b977eb52c1ca536df927ab1474949f1bd90
|
[
"MIT"
] | null | null | null |
data_generators/data_options.py
|
Zack-Quintana/underdog-devs-ds-a
|
743c1b977eb52c1ca536df927ab1474949f1bd90
|
[
"MIT"
] | null | null | null |
import string
from itertools import chain
from math import ceil, floor
from random import randint, choice, random, choices, shuffle
male_first_names = (
"Liam", "Noah", "Oliver", "Elijah", "William", "James", "Benjamin", "Lucas",
"Henry", "Alexander", "Mason", "Michael", "Ethan", "Daniel", "Jacob",
"Logan", "Jackson", "Levi", "Sebastian", "Mateo", "Jack", "Owen",
"Theodore", "Aiden", "Samuel", "Joseph", "John", "David", "Wyatt",
"Matthew", "Luke", "Asher", "Carter", "Julian", "Grayson", "Leo", "Jayden",
"Gabriel", "Isaac", "Lincoln", "Anthony", "Hudson", "Dylan", "Ezra",
"Thomas", "Charles", "Christopher", "Jaxon", "Maverick", "Josiah", "Isaiah",
"Andrew", "Elias", "Joshua", "Nathan", "Caleb", "Ryan", "Adrian", "Miles",
"Eli", "Nolan", "Christian", "Aaron", "Cameron", "Ezekiel", "Colton",
"Luca", "Landon", "Hunter", "Jonathan", "Santiago", "Axel", "Easton",
"Cooper", "Jeremiah", "Angel", "Roman", "Connor", "Jameson", "Robert",
"Greyson", "Jordan", "Ian", "Carson", "Jaxson", "Leonardo", "Nicholas",
"Dominic", "Austin", "Everett", "Brooks", "Xavier", "Kai", "Jose", "Parker",
"Adam", "Jace", "Wesley", "Kayden", "Silas", "Bennett", "Declan", "Waylon",
"Weston", "Evan", "Emmett", "Micah", "Ryder", "Beau", "Damian", "Brayden",
"Gael", "Rowan", "Harrison", "Bryson", "Sawyer", "Amir", "Kingston",
"Jason", "Giovanni", "Vincent", "Ayden", "Chase", "Myles", "Diego",
"Nathaniel", "Legend", "Jonah", "River", "Tyler", "Cole", "Braxton",
"George", "Milo", "Zachary", "Ashton", "Luis", "Jasper", "Kaiden", "Adriel",
"Gavin", "Bentley", "Calvin", "Zion", "Juan", "Maxwell", "Max", "Ryker",
"Carlos", "Emmanuel", "Jayce", "Lorenzo", "Ivan", "Jude", "August", "Kevin",
"Malachi", "Elliott", "Rhett", "Archer", "Karter", "Arthur", "Luka",
"Elliot", "Thiago", "Brandon", "Camden", "Justin", "Jesus", "Maddox",
"King", "Theo", "Enzo", "Matteo", "Emiliano", "Dean", "Hayden", "Finn",
"Brody", "Antonio", "Abel", "Alex", "Tristan", "Graham", "Zayden", "Judah",
"Xander", "Miguel", "Atlas", "Messiah", "Barrett", "Tucker", "Timothy",
"Alan", "Edward", "Leon", "Dawson", "Eric", "Ace", "Victor", "Abraham",
"Nicolas", "Jesse", "Charlie", "Patrick", "Walker", "Joel", "Richard",
"Beckett", "Blake", "Alejandro", "Avery", "Grant", "Peter", "Oscar",
"Matias", "Amari", "Lukas", "Andres", "Arlo", "Colt", "Adonis", "Kyrie",
"Steven", "Felix", "Preston", "Marcus", "Holden", "Emilio", "Remington",
"Jeremy", "Kaleb", "Brantley", "Bryce", "Mark", "Knox", "Israel", "Phoenix",
"Kobe", "Nash", "Griffin", "Caden", "Kenneth", "Kyler", "Hayes", "Jax",
"Rafael", "Beckham", "Javier", "Maximus", "Simon", "Paul", "Omar", "Kaden",
"Kash", "Lane", "Bryan", "Riley", "Zane", "Louis", "Aidan", "Paxton",
"Maximiliano", "Karson", "Cash", "Cayden", "Emerson", "Tobias", "Ronan",
"Brian", "Dallas", "Bradley", "Jorge", "Walter", "Josue", "Khalil",
"Damien", "Jett", "Kairo", "Zander", "Andre", "Cohen", "Crew", "Hendrix",
"Colin", "Chance", "Malakai", "Clayton", "Daxton", "Malcolm", "Lennox",
"Martin", "Jaden", "Kayson", "Bodhi", "Francisco", "Cody", "Erick",
"Kameron", "Atticus", "Dante", "Jensen", "Cruz", "Finley", "Brady",
"Joaquin", "Anderson", "Gunner", "Muhammad", "Zayn", "Derek", "Raymond",
"Kyle", "Angelo", "Reid", "Spencer", "Nico", "Jaylen", "Jake", "Prince",
"Manuel", "Ali", "Gideon", "Stephen", "Ellis", "Orion", "Rylan", "Eduardo",
"Mario", "Rory", "Cristian", "Odin", "Tanner", "Julius", "Callum", "Sean",
"Kane", "Ricardo", "Travis", "Wade", "Warren", "Fernando", "Titus",
"Leonel", "Edwin", "Cairo", "Corbin", "Dakota", "Ismael", "Colson",
"Killian", "Major", "Tate", "Gianni", "Elian", "Remy", "Lawson", "Niko",
"Nasir", "Kade", "Armani", "Ezequiel", "Marshall", "Hector", "Desmond",
"Kason", "Garrett", "Jared", "Cyrus", "Russell", "Cesar", "Tyson", "Malik",
"Donovan", "Jaxton", "Cade", "Romeo", "Nehemiah", "Sergio", "Iker",
"Caiden", "Jay", "Pablo", "Devin", "Jeffrey", "Otto", "Kamari", "Ronin",
"Johnny", "Clark", "Ari", "Marco", "Edgar", "Bowen", "Jaiden", "Grady",
"Zayne", "Sullivan", "Jayceon", "Sterling", "Andy", "Conor", "Raiden",
"Royal", "Royce", "Solomon", "Trevor", "Winston", "Emanuel", "Finnegan",
"Pedro", "Luciano", "Harvey", "Franklin", "Noel", "Troy", "Princeton",
"Johnathan", "Erik", "Fabian", "Oakley", "Rhys", "Porter", "Hugo", "Frank",
"Damon", "Kendrick", "Mathias", "Milan", "Peyton", "Wilder", "Callan",
"Gregory", "Seth", "Matthias", "Briggs", "Ibrahim", "Roberto", "Conner",
"Quinn", "Kashton", "Sage", "Santino", "Kolton", "Alijah", "Dominick",
"Zyaire", "Apollo", "Kylo", "Reed", "Philip", "Kian", "Shawn", "Kaison",
"Leonidas", "Ayaan", "Lucca", "Memphis", "Ford", "Baylor", "Kyson", "Uriel",
"Allen", "Collin", "Ruben", "Archie", "Dalton", "Esteban", "Adan",
"Forrest", "Alonzo", "Isaias", "Leland", "Jase", "Dax", "Kasen", "Gage",
"Kamden", "Marcos", "Jamison", "Francis", "Hank", "Alexis", "Tripp",
"Frederick", "Jonas", "Stetson", "Cassius", "Izaiah", "Eden", "Maximilian",
"Rocco", "Tatum", "Keegan", "Aziel", "Moses", "Bruce", "Lewis", "Braylen",
"Omari", "Mack", "Augustus", "Enrique", "Armando", "Pierce", "Moises",
"Asa", "Shane", "Emmitt", "Soren", "Dorian", "Keanu", "Zaiden", "Raphael",
"Deacon", "Abdiel", "Kieran", "Phillip", "Ryland", "Zachariah", "Casey",
"Zaire", "Albert", "Baker", "Corey", "Kylan", "Denver", "Gunnar", "Jayson",
"Drew", "Callen", "Jasiah", "Drake", "Kannon", "Braylon", "Sonny", "Bo",
"Moshe", "Huxley", "Quentin", "Rowen", "Santana", "Cannon", "Kenzo",
"Wells", "Julio", "Nikolai", "Conrad", "Jalen", "Makai", "Benson",
"Derrick", "Gerardo", "Davis", "Abram", "Mohamed", "Ronald", "Raul",
"Arjun", "Dexter", "Kaysen", "Jaime", "Scott", "Lawrence", "Ariel",
"Skyler", "Danny", "Roland", "Chandler", "Yusuf", "Samson", "Case", "Zain",
"Roy", "Rodrigo", "Sutton", "Boone", "Saint", "Saul", "Jaziel", "Hezekiah",
"Alec", "Arturo", "Jamari", "Jaxtyn", "Julien", "Koa", "Reece", "Landen",
"Koda", "Darius", "Sylas", "Ares", "Kyree", "Boston", "Keith", "Taylor",
"Johan", "Edison", "Sincere", "Watson", "Jerry", "Nikolas", "Quincy",
"Shepherd", "Brycen", "Marvin", "Dariel", "Axton", "Donald", "Bodie",
"Finnley", "Onyx", "Rayan", "Raylan", "Brixton", "Colby", "Shiloh",
"Valentino", "Layton", "Trenton", "Landyn", "Alessandro", "Ahmad",
"Gustavo", "Ledger", "Ridge", "Ander", "Ahmed", "Kingsley", "Issac",
"Mauricio", "Tony", "Leonard", "Mohammed", "Uriah", "Duke", "Kareem",
"Lucian", "Marcelo", "Aarav", "Leandro", "Reign", "Clay", "Kohen", "Dennis",
"Samir", "Ermias", "Otis", "Emir", "Nixon", "Ty", "Sam", "Fletcher",
"Wilson", "Dustin", "Hamza", "Bryant", "Flynn", "Lionel", "Mohammad",
"Cason", "Jamir", "Aden", "Dakari", "Justice", "Dillon", "Layne", "Zaid",
"Alden", "Nelson", "Devon", "Titan", "Chris", "Khari", "Zeke", "Noe",
"Alberto", "Roger", "Brock", "Rex", "Quinton", "Alvin", "Cullen", "Azariah",
"Harlan", "Kellan", "Lennon", "Marcel", "Keaton", "Morgan", "Ricky", "Trey",
"Karsyn", "Langston", "Miller", "Chaim", "Salvador", "Amias", "Tadeo",
"Curtis", "Lachlan", "Amos", "Anakin", "Krew", "Tomas", "Jefferson",
"Yosef", "Bruno", "Korbin", "Augustine", "Cayson", "Mathew", "Vihaan",
"Jamie", "Clyde", "Brendan", "Jagger", "Carmelo", "Harry", "Nathanael",
"Mitchell", "Darren", "Ray", "Jedidiah", "Jimmy", "Lochlan", "Bellamy",
"Eddie", "Rayden", "Reese", "Stanley", "Joe", "Houston", "Douglas",
"Vincenzo", "Casen", "Emery", "Joziah", "Leighton", "Marcellus", "Atreus",
"Aron", "Hugh", "Musa", "Tommy", "Alfredo", "Junior", "Neil", "Westley",
"Banks", "Eliel", "Melvin", "Maximo", "Briar", "Colten", "Lance", "Nova",
"Trace", "Axl", "Ramon", "Vicente", "Brennan", "Caspian", "Remi", "Deandre",
"Legacy", "Lee", "Valentin", "Ben", "Louie", "Westin", "Wayne", "Benicio",
"Grey", "Zayd", "Gatlin", "Mekhi", "Orlando", "Bjorn", "Harley", "Alonso",
"Rio", "Aldo", "Byron", "Eliseo", "Ernesto", "Talon", "Thaddeus", "Brecken",
"Kace", "Kellen", "Enoch", "Kiaan", "Lian", "Creed", "Rohan", "Callahan",
"Jaxxon", "Ocean", "Crosby", "Dash", "Gary", "Mylo", "Ira", "Magnus",
"Salem", "Abdullah", "Kye", "Tru", "Forest", "Jon", "Misael", "Madden",
"Braden", "Carl", "Hassan", "Emory", "Kristian", "Alaric", "Ambrose",
"Dario", "Allan", "Bode", "Boden", "Juelz", "Kristopher", "Genesis",
"Idris", "Ameer", "Anders", "Darian", "Kase", "Aryan", "Dane", "Guillermo",
"Elisha", "Jakobe", "Thatcher", "Eugene", "Ishaan", "Larry", "Wesson",
"Yehuda", "Alvaro", "Bobby", "Bronson", "Dilan", "Kole", "Kyro", "Tristen",
"Blaze", "Brayan", "Jadiel", "Kamryn", "Demetrius", "Maurice", "Arian",
"Kabir", "Rocky", "Rudy", "Randy", "Rodney", "Yousef", "Felipe", "Robin",
"Aydin", "Dior", "Kaiser", "Van", "Brodie", "London", "Eithan", "Stefan",
"Ulises", "Camilo", "Branson", "Jakari", "Judson", "Yahir", "Zavier",
"Damari", "Jakob", "Jaxx", "Bentlee", "Cain", "Niklaus", "Rey", "Zahir",
"Aries", "Blaine", "Kyng", "Castiel", "Henrik", "Joey", "Khalid", "Bear",
"Graysen", "Jair", "Kylen", "Darwin", "Alfred", "Ayan", "Kenji", "Zakai",
"Avi", "Cory", "Fisher", "Jacoby", "Osiris", "Harlem", "Jamal", "Santos",
"Wallace", "Brett", "Fox", "Leif", "Maison", "Reuben", "Adler", "Zev",
"Calum", "Kelvin", "Zechariah", "Bridger", "Mccoy", "Seven", "Shepard",
"Azrael", "Leroy", "Terry", "Harold", "Mac", "Mordechai", "Ahmir", "Cal",
"Franco", "Trent", "Blaise", "Coen", "Dominik", "Marley", "Davion",
"Jeremias", "Riggs", "Jones", "Will", "Damir", "Dangelo", "Canaan", "Dion",
"Jabari", "Landry", "Salvatore", "Kody", "Hakeem", "Truett", "Gerald",
"Lyric", "Gordon", "Jovanni", "Kamdyn", "Alistair", "Cillian", "Foster",
"Terrance", "Murphy", "Zyair", "Cedric", "Rome", "Abner", "Colter",
"Dayton", "Jad", "Xzavier", "Rene", "Vance", "Duncan", "Frankie", "Bishop",
"Davian", "Everest", "Heath", "Jaxen", "Marlon", "Maxton", "Reginald",
"Harris", "Jericho", "Keenan", "Korbyn", "Wes", "Eliezer", "Jeffery",
"Kalel", "Kylian", "Turner", "Willie", "Rogelio", "Ephraim",
)
female_first_names = (
"Olivia", "Emma", "Ava", "Charlotte", "Sophia", "Amelia", "Isabella", "Mia",
"Evelyn", "Harper", "Camila", "Gianna", "Abigail", "Luna", "Ella",
"Elizabeth", "Sofia", "Emily", "Avery", "Mila", "Scarlett", "Eleanor",
"Madison", "Layla", "Penelope", "Aria", "Chloe", "Grace", "Ellie", "Nora",
"Hazel", "Zoey", "Riley", "Victoria", "Lily", "Aurora", "Violet", "Nova",
"Hannah", "Emilia", "Zoe", "Stella", "Everly", "Isla", "Leah", "Lillian",
"Addison", "Willow", "Lucy", "Paisley", "Natalie", "Naomi", "Eliana",
"Brooklyn", "Elena", "Aubrey", "Claire", "Ivy", "Kinsley", "Audrey", "Maya",
"Genesis", "Skylar", "Bella", "Aaliyah", "Madelyn", "Savannah", "Anna",
"Delilah", "Serenity", "Caroline", "Kennedy", "Valentina", "Ruby", "Sophie",
"Alice", "Gabriella", "Sadie", "Ariana", "Allison", "Hailey", "Autumn",
"Nevaeh", "Natalia", "Quinn", "Josephine", "Sarah", "Cora", "Emery",
"Samantha", "Piper", "Leilani", "Eva", "Everleigh", "Madeline", "Lydia",
"Jade", "Peyton", "Brielle", "Adeline", "Vivian", "Rylee", "Clara",
"Raelynn", "Melanie", "Melody", "Julia", "Athena", "Maria", "Liliana",
"Hadley", "Arya", "Rose", "Reagan", "Eliza", "Adalynn", "Kaylee", "Lyla",
"Mackenzie", "Alaia", "Isabelle", "Charlie", "Arianna", "Mary", "Remi",
"Margaret", "Iris", "Parker", "Ximena", "Eden", "Ayla", "Kylie", "Elliana",
"Josie", "Katherine", "Faith", "Alexandra", "Eloise", "Adalyn", "Amaya",
"Jasmine", "Amara", "Daisy", "Reese", "Valerie", "Brianna", "Cecilia",
"Andrea", "Summer", "Valeria", "Norah", "Ariella", "Esther", "Ashley",
"Emerson", "Aubree", "Isabel", "Anastasia", "Ryleigh", "Khloe", "Taylor",
"Londyn", "Lucia", "Emersyn", "Callie", "Sienna", "Blakely", "Kehlani",
"Genevieve", "Alina", "Bailey", "Juniper", "Maeve", "Molly", "Harmony",
"Georgia", "Magnolia", "Catalina", "Freya", "Juliette", "Sloane", "June",
"Sara", "Ada", "Kimberly", "River", "Ember", "Juliana", "Aliyah", "Millie",
"Brynlee", "Teagan", "Morgan", "Jordyn", "London", "Alaina", "Olive",
"Rosalie", "Alyssa", "Ariel", "Finley", "Arabella", "Journee", "Hope",
"Leila", "Alana", "Gemma", "Vanessa", "Gracie", "Noelle", "Marley", "Elise",
"Presley", "Kamila", "Zara", "Amy", "Kayla", "Payton", "Blake", "Ruth",
"Alani", "Annabelle", "Sage", "Aspen", "Laila", "Lila", "Rachel", "Trinity",
"Daniela", "Alexa", "Lilly", "Lauren", "Elsie", "Margot", "Adelyn", "Zuri",
"Brooke", "Sawyer", "Lilah", "Lola", "Selena", "Mya", "Sydney", "Diana",
"Ana", "Vera", "Alayna", "Nyla", "Elaina", "Rebecca", "Angela", "Kali",
"Alivia", "Raegan", "Rowan", "Phoebe", "Camilla", "Joanna", "Malia",
"Vivienne", "Dakota", "Brooklynn", "Evangeline", "Camille", "Jane",
"Nicole", "Catherine", "Jocelyn", "Julianna", "Lena", "Lucille", "Mckenna",
"Paige", "Adelaide", "Charlee", "Mariana", "Myla", "Mckenzie", "Tessa",
"Miriam", "Oakley", "Kailani", "Alayah", "Amira", "Adaline", "Phoenix",
"Milani", "Annie", "Lia", "Angelina", "Harley", "Cali", "Maggie", "Hayden",
"Leia", "Fiona", "Briella", "Journey", "Lennon", "Saylor", "Jayla", "Kaia",
"Thea", "Adriana", "Mariah", "Juliet", "Oaklynn", "Kiara", "Alexis",
"Haven", "Aniyah", "Delaney", "Gracelynn", "Kendall", "Winter", "Lilith",
"Logan", "Amiyah", "Evie", "Alexandria", "Gracelyn", "Gabriela", "Sutton",
"Harlow", "Madilyn", "Makayla", "Evelynn", "Gia", "Nina", "Amina",
"Giselle", "Brynn", "Blair", "Amari", "Octavia", "Michelle", "Talia",
"Demi", "Alaya", "Kaylani", "Izabella", "Fatima", "Tatum", "Makenzie",
"Lilliana", "Arielle", "Palmer", "Melissa", "Willa", "Samara", "Destiny",
"Dahlia", "Celeste", "Ainsley", "Rylie", "Reign", "Laura", "Adelynn",
"Gabrielle", "Remington", "Wren", "Brinley", "Amora", "Lainey", "Collins",
"Lexi", "Aitana", "Alessandra", "Kenzie", "Raelyn", "Elle", "Everlee",
"Haisley", "Hallie", "Wynter", "Daleyza", "Gwendolyn", "Paislee", "Ariyah",
"Veronica", "Heidi", "Anaya", "Cataleya", "Kira", "Avianna", "Felicity",
"Aylin", "Miracle", "Sabrina", "Lana", "Ophelia", "Elianna", "Royalty",
"Madeleine", "Esmeralda", "Joy", "Kalani", "Esme", "Jessica", "Leighton",
"Ariah", "Makenna", "Nylah", "Viviana", "Camryn", "Cassidy", "Dream",
"Luciana", "Maisie", "Stevie", "Kate", "Lyric", "Daniella", "Alicia",
"Daphne", "Frances", "Charli", "Raven", "Paris", "Nayeli", "Serena",
"Heaven", "Bianca", "Helen", "Hattie", "Averie", "Mabel", "Selah", "Allie",
"Marlee", "Kinley", "Regina", "Carmen", "Jennifer", "Jordan", "Alison",
"Stephanie", "Maren", "Kayleigh", "Angel", "Annalise", "Jacqueline",
"Braelynn", "Emory", "Rosemary", "Scarlet", "Amanda", "Danielle", "Emelia",
"Ryan", "Carolina", "Astrid", "Kensley", "Shiloh", "Maci", "Francesca",
"Rory", "Celine", "Kamryn", "Zariah", "Liana", "Poppy", "Maliyah", "Keira",
"Skyler", "Noa", "Skye", "Nadia", "Addilyn", "Rosie", "Eve", "Sarai",
"Edith", "Jolene", "Maddison", "Meadow", "Charleigh", "Matilda", "Elliott",
"Madelynn", "Holly", "Leona", "Azalea", "Katie", "Mira", "Ari", "Kaitlyn",
"Danna", "Cameron", "Kyla", "Bristol", "Kora", "Armani", "Nia", "Malani",
"Dylan", "Remy", "Maia", "Dior", "Legacy", "Alessia", "Shelby", "Maryam",
"Sylvia", "Yaretzi", "Lorelei", "Madilynn", "Abby", "Helena", "Jimena",
"Elisa", "Renata", "Amber", "Aviana", "Carter", "Emmy", "Haley", "Alondra",
"Elaine", "Erin", "April", "Emely", "Imani", "Kennedi", "Lorelai", "Hanna",
"Kelsey", "Aurelia", "Colette", "Jaliyah", "Kylee", "Macie", "Aisha",
"Dorothy", "Charley", "Kathryn", "Adelina", "Adley", "Monroe", "Sierra",
"Ailani", "Miranda", "Mikayla", "Alejandra", "Amirah", "Jada", "Jazlyn",
"Jenna", "Jayleen", "Beatrice", "Kendra", "Lyra", "Nola", "Emberly",
"Mckinley", "Myra", "Katalina", "Antonella", "Zelda", "Alanna", "Amaia",
"Priscilla", "Briar", "Kaliyah", "Itzel", "Oaklyn", "Alma", "Mallory",
"Novah", "Amalia", "Fernanda", "Alia", "Angelica", "Elliot", "Justice",
"Mae", "Cecelia", "Gloria", "Ariya", "Virginia", "Cheyenne", "Aleah",
"Jemma", "Henley", "Meredith", "Leyla", "Lennox", "Ensley", "Zahra",
"Reina", "Frankie", "Lylah", "Nalani", "Reyna", "Saige", "Ivanna", "Aleena",
"Emerie", "Ivory", "Leslie", "Alora", "Ashlyn", "Bethany", "Bonnie",
"Sasha", "Xiomara", "Salem", "Adrianna", "Dayana", "Clementine", "Karina",
"Karsyn", "Emmie", "Julie", "Julieta", "Briana", "Carly", "Macy", "Marie",
"Oaklee", "Christina", "Malaysia", "Ellis", "Irene", "Anne", "Anahi",
"Mara", "Rhea", "Davina", "Dallas", "Jayda", "Mariam", "Skyla", "Siena",
"Elora", "Marilyn", "Jazmin", "Megan", "Rosa", "Savanna", "Allyson",
"Milan", "Coraline", "Johanna", "Melany", "Chelsea", "Michaela", "Melina",
"Angie", "Cassandra", "Yara", "Kassidy", "Liberty", "Lilian", "Avah",
"Anya", "Laney", "Navy", "Opal", "Amani", "Zaylee", "Mina", "Sloan",
"Romina", "Ashlynn", "Aliza", "Liv", "Malaya", "Blaire", "Janelle", "Kara",
"Analia", "Hadassah", "Hayley", "Karla", "Chaya", "Cadence", "Kyra",
"Alena", "Ellianna", "Katelyn", "Kimber", "Laurel", "Lina", "Capri",
"Braelyn", "Faye", "Kamiyah", "Kenna", "Louise", "Calliope", "Kaydence",
"Nala", "Tiana", "Aileen", "Sunny", "Zariyah", "Milana", "Giuliana",
"Eileen", "Elodie", "Rayna", "Monica", "Galilea", "Journi", "Lara",
"Marina", "Aliana", "Harmoni", "Jamie", "Holland", "Emmalyn", "Lauryn",
"Chanel", "Tinsley", "Jessie", "Lacey", "Elyse", "Janiyah", "Jolie", "Ezra",
"Marleigh", "Roselyn", "Lillie", "Louisa", "Madisyn", "Penny", "Kinslee",
"Treasure", "Zaniyah", "Estella", "Jaylah", "Khaleesi", "Alexia", "Dulce",
"Indie", "Maxine", "Waverly", "Giovanna", "Miley", "Saoirse", "Estrella",
"Greta", "Rosalia", "Mylah", "Teresa", "Bridget", "Kelly", "Adalee",
"Aubrie", "Lea", "Harlee", "Anika", "Itzayana", "Hana", "Kaisley",
"Mikaela", "Naya", "Avalynn", "Margo", "Sevyn", "Florence", "Keilani",
"Lyanna", "Joelle", "Kataleya", "Royal", "Averi", "Kallie", "Winnie",
"Baylee", "Martha", "Pearl", "Alaiya", "Rayne", "Sylvie", "Brylee",
"Jazmine", "Ryann", "Kori", "Noemi", "Haylee", "Julissa", "Celia", "Laylah",
"Rebekah", "Rosalee", "Aya", "Bria", "Adele", "Aubrielle", "Tiffany",
"Addyson", "Kai", "Bellamy", "Leilany", "Princess", "Chana", "Estelle",
"Selene", "Sky", "Dani", "Thalia", "Ellen", "Rivka", "Amelie", "Andi",
"Kynlee", "Raina", "Vienna", "Alianna", "Livia", "Madalyn", "Mercy",
"Novalee", "Ramona", "Vada", "Berkley", "Gwen", "Persephone", "Milena",
"Paula", "Clare", "Kairi", "Linda", "Paulina", "Kamilah", "Amoura",
"Hunter", "Isabela", "Karen", "Marianna", "Sariyah", "Theodora", "Annika",
"Kyleigh", "Nellie", "Scarlette", "Keyla", "Kailey", "Mavis", "Lilianna",
"Rosalyn", "Sariah", "Tori", "Yareli", "Aubriella", "Bexley", "Bailee",
"Jianna", "Keily", "Annabella", "Azariah", "Denisse", "Promise", "August",
"Hadlee", "Halle", "Fallon", "Oakleigh", "Zaria", "Jaylin", "Paisleigh",
"Crystal", "Ila", "Aliya", "Cynthia", "Giana", "Maleah", "Rylan", "Aniya",
"Denise", "Emmeline", "Scout", "Simone", "Noah", "Zora", "Meghan", "Landry",
"Ainhoa", "Lilyana", "Noor", "Belen", "Brynleigh", "Cleo", "Meilani",
"Karter", "Amaris", "Frida", "Iliana", "Violeta", "Addisyn", "Nancy",
"Denver", "Leanna", "Braylee", "Kiana", "Wrenley", "Barbara", "Khalani",
"Aspyn", "Ellison", "Judith", "Robin", "Valery", "Aila", "Deborah", "Cara",
"Clarissa", "Iyla", "Lexie", "Anais", "Kaylie", "Nathalie", "Alisson",
"Della", "Addilynn", "Elsa", "Zoya", "Layne", "Marlowe", "Jovie", "Kenia",
"Samira", "Jaylee", "Jenesis", "Etta", "Shay", "Amayah", "Avayah", "Egypt",
"Flora", "Raquel", "Whitney", "Zola", "Giavanna", "Raya", "Veda", "Halo",
"Paloma", "Nataly", "Whitley", "Dalary", "Drew", "Guadalupe", "Kamari",
"Esperanza", "Loretta", "Malayah", "Natasha", "Stormi", "Ansley", "Carolyn",
"Corinne", "Paola", "Brittany", "Emerald", "Freyja", "Zainab", "Artemis",
"Jillian", "Kimora", "Zoie", "Aislinn", "Emmaline", "Ayleen", "Queen",
"Jaycee", "Murphy", "Nyomi", "Elina", "Hadleigh", "Marceline", "Marisol",
"Yasmin", "Zendaya", "Chandler", "Emani", "Jaelynn", "Kaiya", "Nathalia",
"Violette", "Joyce", "Paityn", "Elisabeth", "Emmalynn", "Luella",
"Yamileth", "Aarya", "Luisa", "Zhuri", "Araceli", "Harleigh", "Madalynn",
"Melani", "Laylani", "Magdalena", "Mazikeen", "Belle", "Kadence",
)
last_names = (
"Smith", "Johnson", "Williams", "Brown", "Jones", "Garcia", "Miller",
"Davis", "Rodriguez", "Martinez", "Hernandez", "Lopez", "Gonzales",
"Wilson", "Anderson", "Thomas", "Taylor", "Moore", "Jackson", "Martin",
"Lee", "Perez", "Thompson", "White", "Harris", "Sanchez", "Clark",
"Ramirez", "Lewis", "Robinson", "Walker", "Young", "Allen", "King",
"Wright", "Scott", "Torres", "Nguyen", "Hill", "Flores", "Green", "Adams",
"Nelson", "Baker", "Hall", "Rivera", "Campbell", "Mitchell", "Carter",
"Roberts", "Gomez", "Phillips", "Evans", "Turner", "Diaz", "Parker", "Cruz",
"Edwards", "Collins", "Reyes", "Stewart", "Morris", "Morales", "Murphy",
"Cook", "Rogers", "Gutierrez", "Ortiz", "Morgan", "Cooper", "Peterson",
"Bailey", "Reed", "Kelly", "Howard", "Ramos", "Kim", "Cox", "Ward",
"Richardson", "Watson", "Brooks", "Chavez", "Wood", "James", "Bennet",
"Gray", "Mendoza", "Ruiz", "Hughes", "Price", "Alvarez", "Castillo",
"Sanders", "Patel", "Myers", "Long", "Ross", "Foster", "Jimenez",
)
skill_levels = (
"Beginner", "Intermediate", "Advanced", "Expert",
)
subjects = (
"Web: HTML, CSS, JavaScript", "Data Science: Python",
"Android: Java", "iOS: Swift", "Career Development",
"General Programming",
)
resource_items = ("Laptop", "Books", "Scholarships",
"Mental Health Need", "Financial stipends")
disability = (True, False)
work_status = (True, False)
receiving_assistance = (True, False)
convictions = (
"Felony", "Misdemeanor", "Infraction",
)
feedbacks = (
"Not Recommended, Poor", "Conflicted, Fair", "Recommended, Good",
"Highly Recommended, Very Good", "Best, Excellent",
)
topics = (
"GCA Help", "Resume Help", "Job Search", "Progress Check"
)
def random_first_name(percent_male: int = 50):
if randint(1, 100) > percent_male:
return choice(female_first_names)
else:
return choice(male_first_names)
def percent_true(percent):
return 100 * random() < percent
def generate_uuid(n_len: int):
n1 = ceil(n_len / 2)
n2 = floor(n_len / 2)
prefix = choices(string.ascii_letters, k=n1)
suffix = map(str, choices(range(0, 9), k=n2))
uuid_list = list(chain(prefix, suffix))
shuffle(uuid_list)
uuid = "".join(uuid_list)
return uuid
| 65.538028
| 80
| 0.568297
|
import string
from itertools import chain
from math import ceil, floor
from random import randint, choice, random, choices, shuffle
male_first_names = (
"Liam", "Noah", "Oliver", "Elijah", "William", "James", "Benjamin", "Lucas",
"Henry", "Alexander", "Mason", "Michael", "Ethan", "Daniel", "Jacob",
"Logan", "Jackson", "Levi", "Sebastian", "Mateo", "Jack", "Owen",
"Theodore", "Aiden", "Samuel", "Joseph", "John", "David", "Wyatt",
"Matthew", "Luke", "Asher", "Carter", "Julian", "Grayson", "Leo", "Jayden",
"Gabriel", "Isaac", "Lincoln", "Anthony", "Hudson", "Dylan", "Ezra",
"Thomas", "Charles", "Christopher", "Jaxon", "Maverick", "Josiah", "Isaiah",
"Andrew", "Elias", "Joshua", "Nathan", "Caleb", "Ryan", "Adrian", "Miles",
"Eli", "Nolan", "Christian", "Aaron", "Cameron", "Ezekiel", "Colton",
"Luca", "Landon", "Hunter", "Jonathan", "Santiago", "Axel", "Easton",
"Cooper", "Jeremiah", "Angel", "Roman", "Connor", "Jameson", "Robert",
"Greyson", "Jordan", "Ian", "Carson", "Jaxson", "Leonardo", "Nicholas",
"Dominic", "Austin", "Everett", "Brooks", "Xavier", "Kai", "Jose", "Parker",
"Adam", "Jace", "Wesley", "Kayden", "Silas", "Bennett", "Declan", "Waylon",
"Weston", "Evan", "Emmett", "Micah", "Ryder", "Beau", "Damian", "Brayden",
"Gael", "Rowan", "Harrison", "Bryson", "Sawyer", "Amir", "Kingston",
"Jason", "Giovanni", "Vincent", "Ayden", "Chase", "Myles", "Diego",
"Nathaniel", "Legend", "Jonah", "River", "Tyler", "Cole", "Braxton",
"George", "Milo", "Zachary", "Ashton", "Luis", "Jasper", "Kaiden", "Adriel",
"Gavin", "Bentley", "Calvin", "Zion", "Juan", "Maxwell", "Max", "Ryker",
"Carlos", "Emmanuel", "Jayce", "Lorenzo", "Ivan", "Jude", "August", "Kevin",
"Malachi", "Elliott", "Rhett", "Archer", "Karter", "Arthur", "Luka",
"Elliot", "Thiago", "Brandon", "Camden", "Justin", "Jesus", "Maddox",
"King", "Theo", "Enzo", "Matteo", "Emiliano", "Dean", "Hayden", "Finn",
"Brody", "Antonio", "Abel", "Alex", "Tristan", "Graham", "Zayden", "Judah",
"Xander", "Miguel", "Atlas", "Messiah", "Barrett", "Tucker", "Timothy",
"Alan", "Edward", "Leon", "Dawson", "Eric", "Ace", "Victor", "Abraham",
"Nicolas", "Jesse", "Charlie", "Patrick", "Walker", "Joel", "Richard",
"Beckett", "Blake", "Alejandro", "Avery", "Grant", "Peter", "Oscar",
"Matias", "Amari", "Lukas", "Andres", "Arlo", "Colt", "Adonis", "Kyrie",
"Steven", "Felix", "Preston", "Marcus", "Holden", "Emilio", "Remington",
"Jeremy", "Kaleb", "Brantley", "Bryce", "Mark", "Knox", "Israel", "Phoenix",
"Kobe", "Nash", "Griffin", "Caden", "Kenneth", "Kyler", "Hayes", "Jax",
"Rafael", "Beckham", "Javier", "Maximus", "Simon", "Paul", "Omar", "Kaden",
"Kash", "Lane", "Bryan", "Riley", "Zane", "Louis", "Aidan", "Paxton",
"Maximiliano", "Karson", "Cash", "Cayden", "Emerson", "Tobias", "Ronan",
"Brian", "Dallas", "Bradley", "Jorge", "Walter", "Josue", "Khalil",
"Damien", "Jett", "Kairo", "Zander", "Andre", "Cohen", "Crew", "Hendrix",
"Colin", "Chance", "Malakai", "Clayton", "Daxton", "Malcolm", "Lennox",
"Martin", "Jaden", "Kayson", "Bodhi", "Francisco", "Cody", "Erick",
"Kameron", "Atticus", "Dante", "Jensen", "Cruz", "Finley", "Brady",
"Joaquin", "Anderson", "Gunner", "Muhammad", "Zayn", "Derek", "Raymond",
"Kyle", "Angelo", "Reid", "Spencer", "Nico", "Jaylen", "Jake", "Prince",
"Manuel", "Ali", "Gideon", "Stephen", "Ellis", "Orion", "Rylan", "Eduardo",
"Mario", "Rory", "Cristian", "Odin", "Tanner", "Julius", "Callum", "Sean",
"Kane", "Ricardo", "Travis", "Wade", "Warren", "Fernando", "Titus",
"Leonel", "Edwin", "Cairo", "Corbin", "Dakota", "Ismael", "Colson",
"Killian", "Major", "Tate", "Gianni", "Elian", "Remy", "Lawson", "Niko",
"Nasir", "Kade", "Armani", "Ezequiel", "Marshall", "Hector", "Desmond",
"Kason", "Garrett", "Jared", "Cyrus", "Russell", "Cesar", "Tyson", "Malik",
"Donovan", "Jaxton", "Cade", "Romeo", "Nehemiah", "Sergio", "Iker",
"Caiden", "Jay", "Pablo", "Devin", "Jeffrey", "Otto", "Kamari", "Ronin",
"Johnny", "Clark", "Ari", "Marco", "Edgar", "Bowen", "Jaiden", "Grady",
"Zayne", "Sullivan", "Jayceon", "Sterling", "Andy", "Conor", "Raiden",
"Royal", "Royce", "Solomon", "Trevor", "Winston", "Emanuel", "Finnegan",
"Pedro", "Luciano", "Harvey", "Franklin", "Noel", "Troy", "Princeton",
"Johnathan", "Erik", "Fabian", "Oakley", "Rhys", "Porter", "Hugo", "Frank",
"Damon", "Kendrick", "Mathias", "Milan", "Peyton", "Wilder", "Callan",
"Gregory", "Seth", "Matthias", "Briggs", "Ibrahim", "Roberto", "Conner",
"Quinn", "Kashton", "Sage", "Santino", "Kolton", "Alijah", "Dominick",
"Zyaire", "Apollo", "Kylo", "Reed", "Philip", "Kian", "Shawn", "Kaison",
"Leonidas", "Ayaan", "Lucca", "Memphis", "Ford", "Baylor", "Kyson", "Uriel",
"Allen", "Collin", "Ruben", "Archie", "Dalton", "Esteban", "Adan",
"Forrest", "Alonzo", "Isaias", "Leland", "Jase", "Dax", "Kasen", "Gage",
"Kamden", "Marcos", "Jamison", "Francis", "Hank", "Alexis", "Tripp",
"Frederick", "Jonas", "Stetson", "Cassius", "Izaiah", "Eden", "Maximilian",
"Rocco", "Tatum", "Keegan", "Aziel", "Moses", "Bruce", "Lewis", "Braylen",
"Omari", "Mack", "Augustus", "Enrique", "Armando", "Pierce", "Moises",
"Asa", "Shane", "Emmitt", "Soren", "Dorian", "Keanu", "Zaiden", "Raphael",
"Deacon", "Abdiel", "Kieran", "Phillip", "Ryland", "Zachariah", "Casey",
"Zaire", "Albert", "Baker", "Corey", "Kylan", "Denver", "Gunnar", "Jayson",
"Drew", "Callen", "Jasiah", "Drake", "Kannon", "Braylon", "Sonny", "Bo",
"Moshe", "Huxley", "Quentin", "Rowen", "Santana", "Cannon", "Kenzo",
"Wells", "Julio", "Nikolai", "Conrad", "Jalen", "Makai", "Benson",
"Derrick", "Gerardo", "Davis", "Abram", "Mohamed", "Ronald", "Raul",
"Arjun", "Dexter", "Kaysen", "Jaime", "Scott", "Lawrence", "Ariel",
"Skyler", "Danny", "Roland", "Chandler", "Yusuf", "Samson", "Case", "Zain",
"Roy", "Rodrigo", "Sutton", "Boone", "Saint", "Saul", "Jaziel", "Hezekiah",
"Alec", "Arturo", "Jamari", "Jaxtyn", "Julien", "Koa", "Reece", "Landen",
"Koda", "Darius", "Sylas", "Ares", "Kyree", "Boston", "Keith", "Taylor",
"Johan", "Edison", "Sincere", "Watson", "Jerry", "Nikolas", "Quincy",
"Shepherd", "Brycen", "Marvin", "Dariel", "Axton", "Donald", "Bodie",
"Finnley", "Onyx", "Rayan", "Raylan", "Brixton", "Colby", "Shiloh",
"Valentino", "Layton", "Trenton", "Landyn", "Alessandro", "Ahmad",
"Gustavo", "Ledger", "Ridge", "Ander", "Ahmed", "Kingsley", "Issac",
"Mauricio", "Tony", "Leonard", "Mohammed", "Uriah", "Duke", "Kareem",
"Lucian", "Marcelo", "Aarav", "Leandro", "Reign", "Clay", "Kohen", "Dennis",
"Samir", "Ermias", "Otis", "Emir", "Nixon", "Ty", "Sam", "Fletcher",
"Wilson", "Dustin", "Hamza", "Bryant", "Flynn", "Lionel", "Mohammad",
"Cason", "Jamir", "Aden", "Dakari", "Justice", "Dillon", "Layne", "Zaid",
"Alden", "Nelson", "Devon", "Titan", "Chris", "Khari", "Zeke", "Noe",
"Alberto", "Roger", "Brock", "Rex", "Quinton", "Alvin", "Cullen", "Azariah",
"Harlan", "Kellan", "Lennon", "Marcel", "Keaton", "Morgan", "Ricky", "Trey",
"Karsyn", "Langston", "Miller", "Chaim", "Salvador", "Amias", "Tadeo",
"Curtis", "Lachlan", "Amos", "Anakin", "Krew", "Tomas", "Jefferson",
"Yosef", "Bruno", "Korbin", "Augustine", "Cayson", "Mathew", "Vihaan",
"Jamie", "Clyde", "Brendan", "Jagger", "Carmelo", "Harry", "Nathanael",
"Mitchell", "Darren", "Ray", "Jedidiah", "Jimmy", "Lochlan", "Bellamy",
"Eddie", "Rayden", "Reese", "Stanley", "Joe", "Houston", "Douglas",
"Vincenzo", "Casen", "Emery", "Joziah", "Leighton", "Marcellus", "Atreus",
"Aron", "Hugh", "Musa", "Tommy", "Alfredo", "Junior", "Neil", "Westley",
"Banks", "Eliel", "Melvin", "Maximo", "Briar", "Colten", "Lance", "Nova",
"Trace", "Axl", "Ramon", "Vicente", "Brennan", "Caspian", "Remi", "Deandre",
"Legacy", "Lee", "Valentin", "Ben", "Louie", "Westin", "Wayne", "Benicio",
"Grey", "Zayd", "Gatlin", "Mekhi", "Orlando", "Bjorn", "Harley", "Alonso",
"Rio", "Aldo", "Byron", "Eliseo", "Ernesto", "Talon", "Thaddeus", "Brecken",
"Kace", "Kellen", "Enoch", "Kiaan", "Lian", "Creed", "Rohan", "Callahan",
"Jaxxon", "Ocean", "Crosby", "Dash", "Gary", "Mylo", "Ira", "Magnus",
"Salem", "Abdullah", "Kye", "Tru", "Forest", "Jon", "Misael", "Madden",
"Braden", "Carl", "Hassan", "Emory", "Kristian", "Alaric", "Ambrose",
"Dario", "Allan", "Bode", "Boden", "Juelz", "Kristopher", "Genesis",
"Idris", "Ameer", "Anders", "Darian", "Kase", "Aryan", "Dane", "Guillermo",
"Elisha", "Jakobe", "Thatcher", "Eugene", "Ishaan", "Larry", "Wesson",
"Yehuda", "Alvaro", "Bobby", "Bronson", "Dilan", "Kole", "Kyro", "Tristen",
"Blaze", "Brayan", "Jadiel", "Kamryn", "Demetrius", "Maurice", "Arian",
"Kabir", "Rocky", "Rudy", "Randy", "Rodney", "Yousef", "Felipe", "Robin",
"Aydin", "Dior", "Kaiser", "Van", "Brodie", "London", "Eithan", "Stefan",
"Ulises", "Camilo", "Branson", "Jakari", "Judson", "Yahir", "Zavier",
"Damari", "Jakob", "Jaxx", "Bentlee", "Cain", "Niklaus", "Rey", "Zahir",
"Aries", "Blaine", "Kyng", "Castiel", "Henrik", "Joey", "Khalid", "Bear",
"Graysen", "Jair", "Kylen", "Darwin", "Alfred", "Ayan", "Kenji", "Zakai",
"Avi", "Cory", "Fisher", "Jacoby", "Osiris", "Harlem", "Jamal", "Santos",
"Wallace", "Brett", "Fox", "Leif", "Maison", "Reuben", "Adler", "Zev",
"Calum", "Kelvin", "Zechariah", "Bridger", "Mccoy", "Seven", "Shepard",
"Azrael", "Leroy", "Terry", "Harold", "Mac", "Mordechai", "Ahmir", "Cal",
"Franco", "Trent", "Blaise", "Coen", "Dominik", "Marley", "Davion",
"Jeremias", "Riggs", "Jones", "Will", "Damir", "Dangelo", "Canaan", "Dion",
"Jabari", "Landry", "Salvatore", "Kody", "Hakeem", "Truett", "Gerald",
"Lyric", "Gordon", "Jovanni", "Kamdyn", "Alistair", "Cillian", "Foster",
"Terrance", "Murphy", "Zyair", "Cedric", "Rome", "Abner", "Colter",
"Dayton", "Jad", "Xzavier", "Rene", "Vance", "Duncan", "Frankie", "Bishop",
"Davian", "Everest", "Heath", "Jaxen", "Marlon", "Maxton", "Reginald",
"Harris", "Jericho", "Keenan", "Korbyn", "Wes", "Eliezer", "Jeffery",
"Kalel", "Kylian", "Turner", "Willie", "Rogelio", "Ephraim",
)
female_first_names = (
"Olivia", "Emma", "Ava", "Charlotte", "Sophia", "Amelia", "Isabella", "Mia",
"Evelyn", "Harper", "Camila", "Gianna", "Abigail", "Luna", "Ella",
"Elizabeth", "Sofia", "Emily", "Avery", "Mila", "Scarlett", "Eleanor",
"Madison", "Layla", "Penelope", "Aria", "Chloe", "Grace", "Ellie", "Nora",
"Hazel", "Zoey", "Riley", "Victoria", "Lily", "Aurora", "Violet", "Nova",
"Hannah", "Emilia", "Zoe", "Stella", "Everly", "Isla", "Leah", "Lillian",
"Addison", "Willow", "Lucy", "Paisley", "Natalie", "Naomi", "Eliana",
"Brooklyn", "Elena", "Aubrey", "Claire", "Ivy", "Kinsley", "Audrey", "Maya",
"Genesis", "Skylar", "Bella", "Aaliyah", "Madelyn", "Savannah", "Anna",
"Delilah", "Serenity", "Caroline", "Kennedy", "Valentina", "Ruby", "Sophie",
"Alice", "Gabriella", "Sadie", "Ariana", "Allison", "Hailey", "Autumn",
"Nevaeh", "Natalia", "Quinn", "Josephine", "Sarah", "Cora", "Emery",
"Samantha", "Piper", "Leilani", "Eva", "Everleigh", "Madeline", "Lydia",
"Jade", "Peyton", "Brielle", "Adeline", "Vivian", "Rylee", "Clara",
"Raelynn", "Melanie", "Melody", "Julia", "Athena", "Maria", "Liliana",
"Hadley", "Arya", "Rose", "Reagan", "Eliza", "Adalynn", "Kaylee", "Lyla",
"Mackenzie", "Alaia", "Isabelle", "Charlie", "Arianna", "Mary", "Remi",
"Margaret", "Iris", "Parker", "Ximena", "Eden", "Ayla", "Kylie", "Elliana",
"Josie", "Katherine", "Faith", "Alexandra", "Eloise", "Adalyn", "Amaya",
"Jasmine", "Amara", "Daisy", "Reese", "Valerie", "Brianna", "Cecilia",
"Andrea", "Summer", "Valeria", "Norah", "Ariella", "Esther", "Ashley",
"Emerson", "Aubree", "Isabel", "Anastasia", "Ryleigh", "Khloe", "Taylor",
"Londyn", "Lucia", "Emersyn", "Callie", "Sienna", "Blakely", "Kehlani",
"Genevieve", "Alina", "Bailey", "Juniper", "Maeve", "Molly", "Harmony",
"Georgia", "Magnolia", "Catalina", "Freya", "Juliette", "Sloane", "June",
"Sara", "Ada", "Kimberly", "River", "Ember", "Juliana", "Aliyah", "Millie",
"Brynlee", "Teagan", "Morgan", "Jordyn", "London", "Alaina", "Olive",
"Rosalie", "Alyssa", "Ariel", "Finley", "Arabella", "Journee", "Hope",
"Leila", "Alana", "Gemma", "Vanessa", "Gracie", "Noelle", "Marley", "Elise",
"Presley", "Kamila", "Zara", "Amy", "Kayla", "Payton", "Blake", "Ruth",
"Alani", "Annabelle", "Sage", "Aspen", "Laila", "Lila", "Rachel", "Trinity",
"Daniela", "Alexa", "Lilly", "Lauren", "Elsie", "Margot", "Adelyn", "Zuri",
"Brooke", "Sawyer", "Lilah", "Lola", "Selena", "Mya", "Sydney", "Diana",
"Ana", "Vera", "Alayna", "Nyla", "Elaina", "Rebecca", "Angela", "Kali",
"Alivia", "Raegan", "Rowan", "Phoebe", "Camilla", "Joanna", "Malia",
"Vivienne", "Dakota", "Brooklynn", "Evangeline", "Camille", "Jane",
"Nicole", "Catherine", "Jocelyn", "Julianna", "Lena", "Lucille", "Mckenna",
"Paige", "Adelaide", "Charlee", "Mariana", "Myla", "Mckenzie", "Tessa",
"Miriam", "Oakley", "Kailani", "Alayah", "Amira", "Adaline", "Phoenix",
"Milani", "Annie", "Lia", "Angelina", "Harley", "Cali", "Maggie", "Hayden",
"Leia", "Fiona", "Briella", "Journey", "Lennon", "Saylor", "Jayla", "Kaia",
"Thea", "Adriana", "Mariah", "Juliet", "Oaklynn", "Kiara", "Alexis",
"Haven", "Aniyah", "Delaney", "Gracelynn", "Kendall", "Winter", "Lilith",
"Logan", "Amiyah", "Evie", "Alexandria", "Gracelyn", "Gabriela", "Sutton",
"Harlow", "Madilyn", "Makayla", "Evelynn", "Gia", "Nina", "Amina",
"Giselle", "Brynn", "Blair", "Amari", "Octavia", "Michelle", "Talia",
"Demi", "Alaya", "Kaylani", "Izabella", "Fatima", "Tatum", "Makenzie",
"Lilliana", "Arielle", "Palmer", "Melissa", "Willa", "Samara", "Destiny",
"Dahlia", "Celeste", "Ainsley", "Rylie", "Reign", "Laura", "Adelynn",
"Gabrielle", "Remington", "Wren", "Brinley", "Amora", "Lainey", "Collins",
"Lexi", "Aitana", "Alessandra", "Kenzie", "Raelyn", "Elle", "Everlee",
"Haisley", "Hallie", "Wynter", "Daleyza", "Gwendolyn", "Paislee", "Ariyah",
"Veronica", "Heidi", "Anaya", "Cataleya", "Kira", "Avianna", "Felicity",
"Aylin", "Miracle", "Sabrina", "Lana", "Ophelia", "Elianna", "Royalty",
"Madeleine", "Esmeralda", "Joy", "Kalani", "Esme", "Jessica", "Leighton",
"Ariah", "Makenna", "Nylah", "Viviana", "Camryn", "Cassidy", "Dream",
"Luciana", "Maisie", "Stevie", "Kate", "Lyric", "Daniella", "Alicia",
"Daphne", "Frances", "Charli", "Raven", "Paris", "Nayeli", "Serena",
"Heaven", "Bianca", "Helen", "Hattie", "Averie", "Mabel", "Selah", "Allie",
"Marlee", "Kinley", "Regina", "Carmen", "Jennifer", "Jordan", "Alison",
"Stephanie", "Maren", "Kayleigh", "Angel", "Annalise", "Jacqueline",
"Braelynn", "Emory", "Rosemary", "Scarlet", "Amanda", "Danielle", "Emelia",
"Ryan", "Carolina", "Astrid", "Kensley", "Shiloh", "Maci", "Francesca",
"Rory", "Celine", "Kamryn", "Zariah", "Liana", "Poppy", "Maliyah", "Keira",
"Skyler", "Noa", "Skye", "Nadia", "Addilyn", "Rosie", "Eve", "Sarai",
"Edith", "Jolene", "Maddison", "Meadow", "Charleigh", "Matilda", "Elliott",
"Madelynn", "Holly", "Leona", "Azalea", "Katie", "Mira", "Ari", "Kaitlyn",
"Danna", "Cameron", "Kyla", "Bristol", "Kora", "Armani", "Nia", "Malani",
"Dylan", "Remy", "Maia", "Dior", "Legacy", "Alessia", "Shelby", "Maryam",
"Sylvia", "Yaretzi", "Lorelei", "Madilynn", "Abby", "Helena", "Jimena",
"Elisa", "Renata", "Amber", "Aviana", "Carter", "Emmy", "Haley", "Alondra",
"Elaine", "Erin", "April", "Emely", "Imani", "Kennedi", "Lorelai", "Hanna",
"Kelsey", "Aurelia", "Colette", "Jaliyah", "Kylee", "Macie", "Aisha",
"Dorothy", "Charley", "Kathryn", "Adelina", "Adley", "Monroe", "Sierra",
"Ailani", "Miranda", "Mikayla", "Alejandra", "Amirah", "Jada", "Jazlyn",
"Jenna", "Jayleen", "Beatrice", "Kendra", "Lyra", "Nola", "Emberly",
"Mckinley", "Myra", "Katalina", "Antonella", "Zelda", "Alanna", "Amaia",
"Priscilla", "Briar", "Kaliyah", "Itzel", "Oaklyn", "Alma", "Mallory",
"Novah", "Amalia", "Fernanda", "Alia", "Angelica", "Elliot", "Justice",
"Mae", "Cecelia", "Gloria", "Ariya", "Virginia", "Cheyenne", "Aleah",
"Jemma", "Henley", "Meredith", "Leyla", "Lennox", "Ensley", "Zahra",
"Reina", "Frankie", "Lylah", "Nalani", "Reyna", "Saige", "Ivanna", "Aleena",
"Emerie", "Ivory", "Leslie", "Alora", "Ashlyn", "Bethany", "Bonnie",
"Sasha", "Xiomara", "Salem", "Adrianna", "Dayana", "Clementine", "Karina",
"Karsyn", "Emmie", "Julie", "Julieta", "Briana", "Carly", "Macy", "Marie",
"Oaklee", "Christina", "Malaysia", "Ellis", "Irene", "Anne", "Anahi",
"Mara", "Rhea", "Davina", "Dallas", "Jayda", "Mariam", "Skyla", "Siena",
"Elora", "Marilyn", "Jazmin", "Megan", "Rosa", "Savanna", "Allyson",
"Milan", "Coraline", "Johanna", "Melany", "Chelsea", "Michaela", "Melina",
"Angie", "Cassandra", "Yara", "Kassidy", "Liberty", "Lilian", "Avah",
"Anya", "Laney", "Navy", "Opal", "Amani", "Zaylee", "Mina", "Sloan",
"Romina", "Ashlynn", "Aliza", "Liv", "Malaya", "Blaire", "Janelle", "Kara",
"Analia", "Hadassah", "Hayley", "Karla", "Chaya", "Cadence", "Kyra",
"Alena", "Ellianna", "Katelyn", "Kimber", "Laurel", "Lina", "Capri",
"Braelyn", "Faye", "Kamiyah", "Kenna", "Louise", "Calliope", "Kaydence",
"Nala", "Tiana", "Aileen", "Sunny", "Zariyah", "Milana", "Giuliana",
"Eileen", "Elodie", "Rayna", "Monica", "Galilea", "Journi", "Lara",
"Marina", "Aliana", "Harmoni", "Jamie", "Holland", "Emmalyn", "Lauryn",
"Chanel", "Tinsley", "Jessie", "Lacey", "Elyse", "Janiyah", "Jolie", "Ezra",
"Marleigh", "Roselyn", "Lillie", "Louisa", "Madisyn", "Penny", "Kinslee",
"Treasure", "Zaniyah", "Estella", "Jaylah", "Khaleesi", "Alexia", "Dulce",
"Indie", "Maxine", "Waverly", "Giovanna", "Miley", "Saoirse", "Estrella",
"Greta", "Rosalia", "Mylah", "Teresa", "Bridget", "Kelly", "Adalee",
"Aubrie", "Lea", "Harlee", "Anika", "Itzayana", "Hana", "Kaisley",
"Mikaela", "Naya", "Avalynn", "Margo", "Sevyn", "Florence", "Keilani",
"Lyanna", "Joelle", "Kataleya", "Royal", "Averi", "Kallie", "Winnie",
"Baylee", "Martha", "Pearl", "Alaiya", "Rayne", "Sylvie", "Brylee",
"Jazmine", "Ryann", "Kori", "Noemi", "Haylee", "Julissa", "Celia", "Laylah",
"Rebekah", "Rosalee", "Aya", "Bria", "Adele", "Aubrielle", "Tiffany",
"Addyson", "Kai", "Bellamy", "Leilany", "Princess", "Chana", "Estelle",
"Selene", "Sky", "Dani", "Thalia", "Ellen", "Rivka", "Amelie", "Andi",
"Kynlee", "Raina", "Vienna", "Alianna", "Livia", "Madalyn", "Mercy",
"Novalee", "Ramona", "Vada", "Berkley", "Gwen", "Persephone", "Milena",
"Paula", "Clare", "Kairi", "Linda", "Paulina", "Kamilah", "Amoura",
"Hunter", "Isabela", "Karen", "Marianna", "Sariyah", "Theodora", "Annika",
"Kyleigh", "Nellie", "Scarlette", "Keyla", "Kailey", "Mavis", "Lilianna",
"Rosalyn", "Sariah", "Tori", "Yareli", "Aubriella", "Bexley", "Bailee",
"Jianna", "Keily", "Annabella", "Azariah", "Denisse", "Promise", "August",
"Hadlee", "Halle", "Fallon", "Oakleigh", "Zaria", "Jaylin", "Paisleigh",
"Crystal", "Ila", "Aliya", "Cynthia", "Giana", "Maleah", "Rylan", "Aniya",
"Denise", "Emmeline", "Scout", "Simone", "Noah", "Zora", "Meghan", "Landry",
"Ainhoa", "Lilyana", "Noor", "Belen", "Brynleigh", "Cleo", "Meilani",
"Karter", "Amaris", "Frida", "Iliana", "Violeta", "Addisyn", "Nancy",
"Denver", "Leanna", "Braylee", "Kiana", "Wrenley", "Barbara", "Khalani",
"Aspyn", "Ellison", "Judith", "Robin", "Valery", "Aila", "Deborah", "Cara",
"Clarissa", "Iyla", "Lexie", "Anais", "Kaylie", "Nathalie", "Alisson",
"Della", "Addilynn", "Elsa", "Zoya", "Layne", "Marlowe", "Jovie", "Kenia",
"Samira", "Jaylee", "Jenesis", "Etta", "Shay", "Amayah", "Avayah", "Egypt",
"Flora", "Raquel", "Whitney", "Zola", "Giavanna", "Raya", "Veda", "Halo",
"Paloma", "Nataly", "Whitley", "Dalary", "Drew", "Guadalupe", "Kamari",
"Esperanza", "Loretta", "Malayah", "Natasha", "Stormi", "Ansley", "Carolyn",
"Corinne", "Paola", "Brittany", "Emerald", "Freyja", "Zainab", "Artemis",
"Jillian", "Kimora", "Zoie", "Aislinn", "Emmaline", "Ayleen", "Queen",
"Jaycee", "Murphy", "Nyomi", "Elina", "Hadleigh", "Marceline", "Marisol",
"Yasmin", "Zendaya", "Chandler", "Emani", "Jaelynn", "Kaiya", "Nathalia",
"Violette", "Joyce", "Paityn", "Elisabeth", "Emmalynn", "Luella",
"Yamileth", "Aarya", "Luisa", "Zhuri", "Araceli", "Harleigh", "Madalynn",
"Melani", "Laylani", "Magdalena", "Mazikeen", "Belle", "Kadence",
)
last_names = (
"Smith", "Johnson", "Williams", "Brown", "Jones", "Garcia", "Miller",
"Davis", "Rodriguez", "Martinez", "Hernandez", "Lopez", "Gonzales",
"Wilson", "Anderson", "Thomas", "Taylor", "Moore", "Jackson", "Martin",
"Lee", "Perez", "Thompson", "White", "Harris", "Sanchez", "Clark",
"Ramirez", "Lewis", "Robinson", "Walker", "Young", "Allen", "King",
"Wright", "Scott", "Torres", "Nguyen", "Hill", "Flores", "Green", "Adams",
"Nelson", "Baker", "Hall", "Rivera", "Campbell", "Mitchell", "Carter",
"Roberts", "Gomez", "Phillips", "Evans", "Turner", "Diaz", "Parker", "Cruz",
"Edwards", "Collins", "Reyes", "Stewart", "Morris", "Morales", "Murphy",
"Cook", "Rogers", "Gutierrez", "Ortiz", "Morgan", "Cooper", "Peterson",
"Bailey", "Reed", "Kelly", "Howard", "Ramos", "Kim", "Cox", "Ward",
"Richardson", "Watson", "Brooks", "Chavez", "Wood", "James", "Bennet",
"Gray", "Mendoza", "Ruiz", "Hughes", "Price", "Alvarez", "Castillo",
"Sanders", "Patel", "Myers", "Long", "Ross", "Foster", "Jimenez",
)
skill_levels = (
"Beginner", "Intermediate", "Advanced", "Expert",
)
subjects = (
"Web: HTML, CSS, JavaScript", "Data Science: Python",
"Android: Java", "iOS: Swift", "Career Development",
"General Programming",
)
resource_items = ("Laptop", "Books", "Scholarships",
"Mental Health Need", "Financial stipends")
disability = (True, False)
work_status = (True, False)
receiving_assistance = (True, False)
convictions = (
"Felony", "Misdemeanor", "Infraction",
)
feedbacks = (
"Not Recommended, Poor", "Conflicted, Fair", "Recommended, Good",
"Highly Recommended, Very Good", "Best, Excellent",
)
topics = (
"GCA Help", "Resume Help", "Job Search", "Progress Check"
)
def random_first_name(percent_male: int = 50):
if randint(1, 100) > percent_male:
return choice(female_first_names)
else:
return choice(male_first_names)
def percent_true(percent):
return 100 * random() < percent
def generate_uuid(n_len: int):
n1 = ceil(n_len / 2)
n2 = floor(n_len / 2)
prefix = choices(string.ascii_letters, k=n1)
suffix = map(str, choices(range(0, 9), k=n2))
uuid_list = list(chain(prefix, suffix))
shuffle(uuid_list)
uuid = "".join(uuid_list)
return uuid
| true
| true
|
f719579272f6a91654381f1c2c1b0fc8aede760f
| 1,559
|
py
|
Python
|
channels/streamedit.py
|
leigh123linux/streamtuner2
|
43ded3a68bcf3d968a99c849d779fc8c3fb3d8d8
|
[
"MIT"
] | 1
|
2019-03-03T19:58:01.000Z
|
2019-03-03T19:58:01.000Z
|
channels/streamedit.py
|
leigh123linux/streamtuner2
|
43ded3a68bcf3d968a99c849d779fc8c3fb3d8d8
|
[
"MIT"
] | null | null | null |
channels/streamedit.py
|
leigh123linux/streamtuner2
|
43ded3a68bcf3d968a99c849d779fc8c3fb3d8d8
|
[
"MIT"
] | null | null | null |
# api: streamtuner2
# title: Stream entry editor
# description: Allows to inspect and modify station/stream entries.
# version: 0.6
# type: feature
# category: ui
# config: -
# priority: core
#
# Editing dialog for stream entries. Available in
# the context and main menu. Most useful for
# changing bookmarks, or even creating new ones.
#
from uikit import *
import channels
from config import *
from copy import copy
# aux win: stream data editing dialog
class streamedit (AuxiliaryWindow):
fields = [
"favicon", "format", "genre", "homepage", "playing", "title", "url", "extra"
]
# show stream data editing dialog
def open(self, mw):
self.main.configwin.load_config(self.main.row(), "streamedit_")
self.win_streamedit.show_all()
# copy widget contents to stream
def save(self, w):
row = self.main.row()
for k in self.fields:
if not k in row:
row[k] = ""
self.main.configwin.save_config(row, "streamedit_")
self.main.channel().save()
self.cancel(w)
# add a new list entry, update window
def new(self, w):
s = self.main.channel().stations()
s.append({"title":"new", "url":"", "format":"audio/mpeg", "genre":"", "listeners":1});
self.main.channel().switch() # update display
self.main.channel().gtk_list.get_selection().select_path(str(len(s)-1)); # set cursor to last row
self.open(w)
# hide window
def cancel(self, *w):
self.win_streamedit.hide()
return True
| 27.839286
| 105
| 0.628608
|
from uikit import *
import channels
from config import *
from copy import copy
class streamedit (AuxiliaryWindow):
fields = [
"favicon", "format", "genre", "homepage", "playing", "title", "url", "extra"
]
def open(self, mw):
self.main.configwin.load_config(self.main.row(), "streamedit_")
self.win_streamedit.show_all()
def save(self, w):
row = self.main.row()
for k in self.fields:
if not k in row:
row[k] = ""
self.main.configwin.save_config(row, "streamedit_")
self.main.channel().save()
self.cancel(w)
def new(self, w):
s = self.main.channel().stations()
s.append({"title":"new", "url":"", "format":"audio/mpeg", "genre":"", "listeners":1});
self.main.channel().switch()
self.main.channel().gtk_list.get_selection().select_path(str(len(s)-1));
self.open(w)
def cancel(self, *w):
self.win_streamedit.hide()
return True
| true
| true
|
f71957e4865285d50289aa6cd3aa2a4c44bdc813
| 260
|
py
|
Python
|
lucky-four.py
|
omar115/codechef
|
7634b085bb906e4ef29e6ae08bdbe82add2aa345
|
[
"MIT"
] | null | null | null |
lucky-four.py
|
omar115/codechef
|
7634b085bb906e4ef29e6ae08bdbe82add2aa345
|
[
"MIT"
] | null | null | null |
lucky-four.py
|
omar115/codechef
|
7634b085bb906e4ef29e6ae08bdbe82add2aa345
|
[
"MIT"
] | null | null | null |
t = int(input())
i=0
while i < t:
st = str(input())
length = len(st)
j = 0
cnt = 0
while j < length:
num = int(st[j])
#print(num)
if num == 4:
cnt = cnt + 1
j=j+1
print(cnt)
i=i+1
| 16.25
| 25
| 0.388462
|
t = int(input())
i=0
while i < t:
st = str(input())
length = len(st)
j = 0
cnt = 0
while j < length:
num = int(st[j])
if num == 4:
cnt = cnt + 1
j=j+1
print(cnt)
i=i+1
| true
| true
|
f71958930d922f66399bb7f49e57cab4ab83d335
| 12,028
|
py
|
Python
|
hfc/protos/peer/collection_pb2.py
|
roviso/hyberledger-py
|
908dd597e0822f99cf618f235dd517824ba44bc4
|
[
"Apache-2.0"
] | 389
|
2016-09-18T11:50:10.000Z
|
2022-03-29T21:45:40.000Z
|
hfc/protos/peer/collection_pb2.py
|
regrlomon/fabric-sdk-py
|
57ddc125cd0627c602d55b300d3e0ba50600ea9e
|
[
"Apache-2.0"
] | 112
|
2017-08-18T00:32:21.000Z
|
2022-02-25T18:55:57.000Z
|
hfc/protos/peer/collection_pb2.py
|
regrlomon/fabric-sdk-py
|
57ddc125cd0627c602d55b300d3e0ba50600ea9e
|
[
"Apache-2.0"
] | 268
|
2016-10-12T02:56:58.000Z
|
2022-03-30T09:50:54.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hfc/protos/peer/collection.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from hfc.protos.common import policies_pb2 as hfc_dot_protos_dot_common_dot_policies__pb2
from hfc.protos.peer import policy_pb2 as hfc_dot_protos_dot_peer_dot_policy__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='hfc/protos/peer/collection.proto',
package='protos',
syntax='proto3',
serialized_options=b'\n\"org.hyperledger.fabric.protos.peerZ,github.com/hyperledger/fabric-protos-go/peer',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n hfc/protos/peer/collection.proto\x12\x06protos\x1a hfc/protos/common/policies.proto\x1a\x1chfc/protos/peer/policy.proto\"C\n\x17\x43ollectionConfigPackage\x12(\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x18.protos.CollectionConfig\"a\n\x10\x43ollectionConfig\x12\x42\n\x18static_collection_config\x18\x01 \x01(\x0b\x32\x1e.protos.StaticCollectionConfigH\x00\x42\t\n\x07payload\"\x9e\x02\n\x16StaticCollectionConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12:\n\x12member_orgs_policy\x18\x02 \x01(\x0b\x32\x1e.protos.CollectionPolicyConfig\x12\x1b\n\x13required_peer_count\x18\x03 \x01(\x05\x12\x1a\n\x12maximum_peer_count\x18\x04 \x01(\x05\x12\x15\n\rblock_to_live\x18\x05 \x01(\x04\x12\x18\n\x10member_only_read\x18\x06 \x01(\x08\x12\x19\n\x11member_only_write\x18\x07 \x01(\x08\x12\x35\n\x12\x65ndorsement_policy\x18\x08 \x01(\x0b\x32\x19.protos.ApplicationPolicy\"`\n\x16\x43ollectionPolicyConfig\x12;\n\x10signature_policy\x18\x01 \x01(\x0b\x32\x1f.common.SignaturePolicyEnvelopeH\x00\x42\t\n\x07payloadBR\n\"org.hyperledger.fabric.protos.peerZ,github.com/hyperledger/fabric-protos-go/peerb\x06proto3'
,
dependencies=[hfc_dot_protos_dot_common_dot_policies__pb2.DESCRIPTOR,hfc_dot_protos_dot_peer_dot_policy__pb2.DESCRIPTOR,])
_COLLECTIONCONFIGPACKAGE = _descriptor.Descriptor(
name='CollectionConfigPackage',
full_name='protos.CollectionConfigPackage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='protos.CollectionConfigPackage.config', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=175,
)
_COLLECTIONCONFIG = _descriptor.Descriptor(
name='CollectionConfig',
full_name='protos.CollectionConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='static_collection_config', full_name='protos.CollectionConfig.static_collection_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='protos.CollectionConfig.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=177,
serialized_end=274,
)
_STATICCOLLECTIONCONFIG = _descriptor.Descriptor(
name='StaticCollectionConfig',
full_name='protos.StaticCollectionConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='protos.StaticCollectionConfig.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='member_orgs_policy', full_name='protos.StaticCollectionConfig.member_orgs_policy', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='required_peer_count', full_name='protos.StaticCollectionConfig.required_peer_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='maximum_peer_count', full_name='protos.StaticCollectionConfig.maximum_peer_count', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='block_to_live', full_name='protos.StaticCollectionConfig.block_to_live', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='member_only_read', full_name='protos.StaticCollectionConfig.member_only_read', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='member_only_write', full_name='protos.StaticCollectionConfig.member_only_write', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='endorsement_policy', full_name='protos.StaticCollectionConfig.endorsement_policy', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=563,
)
_COLLECTIONPOLICYCONFIG = _descriptor.Descriptor(
name='CollectionPolicyConfig',
full_name='protos.CollectionPolicyConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='signature_policy', full_name='protos.CollectionPolicyConfig.signature_policy', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='protos.CollectionPolicyConfig.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=565,
serialized_end=661,
)
_COLLECTIONCONFIGPACKAGE.fields_by_name['config'].message_type = _COLLECTIONCONFIG
_COLLECTIONCONFIG.fields_by_name['static_collection_config'].message_type = _STATICCOLLECTIONCONFIG
_COLLECTIONCONFIG.oneofs_by_name['payload'].fields.append(
_COLLECTIONCONFIG.fields_by_name['static_collection_config'])
_COLLECTIONCONFIG.fields_by_name['static_collection_config'].containing_oneof = _COLLECTIONCONFIG.oneofs_by_name['payload']
_STATICCOLLECTIONCONFIG.fields_by_name['member_orgs_policy'].message_type = _COLLECTIONPOLICYCONFIG
_STATICCOLLECTIONCONFIG.fields_by_name['endorsement_policy'].message_type = hfc_dot_protos_dot_peer_dot_policy__pb2._APPLICATIONPOLICY
_COLLECTIONPOLICYCONFIG.fields_by_name['signature_policy'].message_type = hfc_dot_protos_dot_common_dot_policies__pb2._SIGNATUREPOLICYENVELOPE
_COLLECTIONPOLICYCONFIG.oneofs_by_name['payload'].fields.append(
_COLLECTIONPOLICYCONFIG.fields_by_name['signature_policy'])
_COLLECTIONPOLICYCONFIG.fields_by_name['signature_policy'].containing_oneof = _COLLECTIONPOLICYCONFIG.oneofs_by_name['payload']
DESCRIPTOR.message_types_by_name['CollectionConfigPackage'] = _COLLECTIONCONFIGPACKAGE
DESCRIPTOR.message_types_by_name['CollectionConfig'] = _COLLECTIONCONFIG
DESCRIPTOR.message_types_by_name['StaticCollectionConfig'] = _STATICCOLLECTIONCONFIG
DESCRIPTOR.message_types_by_name['CollectionPolicyConfig'] = _COLLECTIONPOLICYCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CollectionConfigPackage = _reflection.GeneratedProtocolMessageType('CollectionConfigPackage', (_message.Message,), {
'DESCRIPTOR' : _COLLECTIONCONFIGPACKAGE,
'__module__' : 'hfc.protos.peer.collection_pb2'
# @@protoc_insertion_point(class_scope:protos.CollectionConfigPackage)
})
_sym_db.RegisterMessage(CollectionConfigPackage)
CollectionConfig = _reflection.GeneratedProtocolMessageType('CollectionConfig', (_message.Message,), {
'DESCRIPTOR' : _COLLECTIONCONFIG,
'__module__' : 'hfc.protos.peer.collection_pb2'
# @@protoc_insertion_point(class_scope:protos.CollectionConfig)
})
_sym_db.RegisterMessage(CollectionConfig)
StaticCollectionConfig = _reflection.GeneratedProtocolMessageType('StaticCollectionConfig', (_message.Message,), {
'DESCRIPTOR' : _STATICCOLLECTIONCONFIG,
'__module__' : 'hfc.protos.peer.collection_pb2'
# @@protoc_insertion_point(class_scope:protos.StaticCollectionConfig)
})
_sym_db.RegisterMessage(StaticCollectionConfig)
CollectionPolicyConfig = _reflection.GeneratedProtocolMessageType('CollectionPolicyConfig', (_message.Message,), {
'DESCRIPTOR' : _COLLECTIONPOLICYCONFIG,
'__module__' : 'hfc.protos.peer.collection_pb2'
# @@protoc_insertion_point(class_scope:protos.CollectionPolicyConfig)
})
_sym_db.RegisterMessage(CollectionPolicyConfig)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 45.388679
| 1,115
| 0.786415
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from hfc.protos.common import policies_pb2 as hfc_dot_protos_dot_common_dot_policies__pb2
from hfc.protos.peer import policy_pb2 as hfc_dot_protos_dot_peer_dot_policy__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='hfc/protos/peer/collection.proto',
package='protos',
syntax='proto3',
serialized_options=b'\n\"org.hyperledger.fabric.protos.peerZ,github.com/hyperledger/fabric-protos-go/peer',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n hfc/protos/peer/collection.proto\x12\x06protos\x1a hfc/protos/common/policies.proto\x1a\x1chfc/protos/peer/policy.proto\"C\n\x17\x43ollectionConfigPackage\x12(\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x18.protos.CollectionConfig\"a\n\x10\x43ollectionConfig\x12\x42\n\x18static_collection_config\x18\x01 \x01(\x0b\x32\x1e.protos.StaticCollectionConfigH\x00\x42\t\n\x07payload\"\x9e\x02\n\x16StaticCollectionConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12:\n\x12member_orgs_policy\x18\x02 \x01(\x0b\x32\x1e.protos.CollectionPolicyConfig\x12\x1b\n\x13required_peer_count\x18\x03 \x01(\x05\x12\x1a\n\x12maximum_peer_count\x18\x04 \x01(\x05\x12\x15\n\rblock_to_live\x18\x05 \x01(\x04\x12\x18\n\x10member_only_read\x18\x06 \x01(\x08\x12\x19\n\x11member_only_write\x18\x07 \x01(\x08\x12\x35\n\x12\x65ndorsement_policy\x18\x08 \x01(\x0b\x32\x19.protos.ApplicationPolicy\"`\n\x16\x43ollectionPolicyConfig\x12;\n\x10signature_policy\x18\x01 \x01(\x0b\x32\x1f.common.SignaturePolicyEnvelopeH\x00\x42\t\n\x07payloadBR\n\"org.hyperledger.fabric.protos.peerZ,github.com/hyperledger/fabric-protos-go/peerb\x06proto3'
,
dependencies=[hfc_dot_protos_dot_common_dot_policies__pb2.DESCRIPTOR,hfc_dot_protos_dot_peer_dot_policy__pb2.DESCRIPTOR,])
_COLLECTIONCONFIGPACKAGE = _descriptor.Descriptor(
name='CollectionConfigPackage',
full_name='protos.CollectionConfigPackage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='protos.CollectionConfigPackage.config', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=175,
)
_COLLECTIONCONFIG = _descriptor.Descriptor(
name='CollectionConfig',
full_name='protos.CollectionConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='static_collection_config', full_name='protos.CollectionConfig.static_collection_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='protos.CollectionConfig.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=177,
serialized_end=274,
)
_STATICCOLLECTIONCONFIG = _descriptor.Descriptor(
name='StaticCollectionConfig',
full_name='protos.StaticCollectionConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='protos.StaticCollectionConfig.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='member_orgs_policy', full_name='protos.StaticCollectionConfig.member_orgs_policy', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='required_peer_count', full_name='protos.StaticCollectionConfig.required_peer_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='maximum_peer_count', full_name='protos.StaticCollectionConfig.maximum_peer_count', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='block_to_live', full_name='protos.StaticCollectionConfig.block_to_live', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='member_only_read', full_name='protos.StaticCollectionConfig.member_only_read', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='member_only_write', full_name='protos.StaticCollectionConfig.member_only_write', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='endorsement_policy', full_name='protos.StaticCollectionConfig.endorsement_policy', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=563,
)
_COLLECTIONPOLICYCONFIG = _descriptor.Descriptor(
name='CollectionPolicyConfig',
full_name='protos.CollectionPolicyConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='signature_policy', full_name='protos.CollectionPolicyConfig.signature_policy', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload', full_name='protos.CollectionPolicyConfig.payload',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=565,
serialized_end=661,
)
_COLLECTIONCONFIGPACKAGE.fields_by_name['config'].message_type = _COLLECTIONCONFIG
_COLLECTIONCONFIG.fields_by_name['static_collection_config'].message_type = _STATICCOLLECTIONCONFIG
_COLLECTIONCONFIG.oneofs_by_name['payload'].fields.append(
_COLLECTIONCONFIG.fields_by_name['static_collection_config'])
_COLLECTIONCONFIG.fields_by_name['static_collection_config'].containing_oneof = _COLLECTIONCONFIG.oneofs_by_name['payload']
_STATICCOLLECTIONCONFIG.fields_by_name['member_orgs_policy'].message_type = _COLLECTIONPOLICYCONFIG
_STATICCOLLECTIONCONFIG.fields_by_name['endorsement_policy'].message_type = hfc_dot_protos_dot_peer_dot_policy__pb2._APPLICATIONPOLICY
_COLLECTIONPOLICYCONFIG.fields_by_name['signature_policy'].message_type = hfc_dot_protos_dot_common_dot_policies__pb2._SIGNATUREPOLICYENVELOPE
_COLLECTIONPOLICYCONFIG.oneofs_by_name['payload'].fields.append(
_COLLECTIONPOLICYCONFIG.fields_by_name['signature_policy'])
_COLLECTIONPOLICYCONFIG.fields_by_name['signature_policy'].containing_oneof = _COLLECTIONPOLICYCONFIG.oneofs_by_name['payload']
DESCRIPTOR.message_types_by_name['CollectionConfigPackage'] = _COLLECTIONCONFIGPACKAGE
DESCRIPTOR.message_types_by_name['CollectionConfig'] = _COLLECTIONCONFIG
DESCRIPTOR.message_types_by_name['StaticCollectionConfig'] = _STATICCOLLECTIONCONFIG
DESCRIPTOR.message_types_by_name['CollectionPolicyConfig'] = _COLLECTIONPOLICYCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CollectionConfigPackage = _reflection.GeneratedProtocolMessageType('CollectionConfigPackage', (_message.Message,), {
'DESCRIPTOR' : _COLLECTIONCONFIGPACKAGE,
'__module__' : 'hfc.protos.peer.collection_pb2'
})
_sym_db.RegisterMessage(CollectionConfigPackage)
CollectionConfig = _reflection.GeneratedProtocolMessageType('CollectionConfig', (_message.Message,), {
'DESCRIPTOR' : _COLLECTIONCONFIG,
'__module__' : 'hfc.protos.peer.collection_pb2'
})
_sym_db.RegisterMessage(CollectionConfig)
StaticCollectionConfig = _reflection.GeneratedProtocolMessageType('StaticCollectionConfig', (_message.Message,), {
'DESCRIPTOR' : _STATICCOLLECTIONCONFIG,
'__module__' : 'hfc.protos.peer.collection_pb2'
})
_sym_db.RegisterMessage(StaticCollectionConfig)
CollectionPolicyConfig = _reflection.GeneratedProtocolMessageType('CollectionPolicyConfig', (_message.Message,), {
'DESCRIPTOR' : _COLLECTIONPOLICYCONFIG,
'__module__' : 'hfc.protos.peer.collection_pb2'
})
_sym_db.RegisterMessage(CollectionPolicyConfig)
DESCRIPTOR._options = None
| true
| true
|
f7195995fd51c5d254684ca38dffb1faa4bb8fd0
| 594
|
py
|
Python
|
tests/__init__.py
|
techthiyanes/ml_things
|
ddeeb16c55cf1d55cf80963217a8d1bffd0913cc
|
[
"Apache-2.0"
] | 153
|
2020-10-10T05:12:16.000Z
|
2022-03-17T07:48:42.000Z
|
tests/__init__.py
|
techthiyanes/ml_things
|
ddeeb16c55cf1d55cf80963217a8d1bffd0913cc
|
[
"Apache-2.0"
] | 21
|
2020-09-15T22:52:43.000Z
|
2022-02-21T15:27:16.000Z
|
tests/__init__.py
|
techthiyanes/ml_things
|
ddeeb16c55cf1d55cf80963217a8d1bffd0913cc
|
[
"Apache-2.0"
] | 42
|
2020-10-11T07:33:32.000Z
|
2022-03-11T01:43:54.000Z
|
# coding=utf-8
# Copyright 2020 George Mihaila.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 39.6
| 74
| 0.760943
| true
| true
|
|
f7195a417bd65f657d54400929963ce73b5b19d7
| 14,584
|
py
|
Python
|
qiskit/quantum_info/operators/measures.py
|
KOLANICH/qiskit-terra
|
3947f258ddb31a2b8dd17aff5d2d041d29d74601
|
[
"Apache-2.0"
] | 1
|
2021-04-28T14:37:16.000Z
|
2021-04-28T14:37:16.000Z
|
qiskit/quantum_info/operators/measures.py
|
timgates42/qiskit-terra
|
3947f258ddb31a2b8dd17aff5d2d041d29d74601
|
[
"Apache-2.0"
] | 6
|
2021-01-17T17:56:08.000Z
|
2021-04-01T12:40:21.000Z
|
qiskit/quantum_info/operators/measures.py
|
timgates42/qiskit-terra
|
3947f258ddb31a2b8dd17aff5d2d041d29d74601
|
[
"Apache-2.0"
] | 2
|
2021-03-07T07:58:54.000Z
|
2021-04-28T03:40:49.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
A collection of useful quantum information functions for operators.
"""
import warnings
import numpy as np
from scipy import sparse
from qiskit.exceptions import QiskitError
from qiskit.circuit.gate import Gate
from qiskit.quantum_info.operators.base_operator import BaseOperator
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel
from qiskit.quantum_info.operators.channel import Choi, SuperOp
from qiskit.quantum_info.states.densitymatrix import DensityMatrix
from qiskit.quantum_info.states.measures import state_fidelity
try:
import cvxpy
_HAS_CVX = True
except ImportError:
_HAS_CVX = False
def process_fidelity(channel,
target=None,
require_cp=True,
require_tp=False):
r"""Return the process fidelity of a noisy quantum channel.
The process fidelity :math:`F_{\text{pro}}(\mathcal{E}, \methcal{F})`
between two quantum channels :math:`\mathcal{E}, \mathcal{F}` is given by
.. math:
F_{\text{pro}}(\mathcal{E}, \mathcal{F})
= F(\rho_{\mathcal{E}}, \rho_{\mathcal{F}})
where :math:`F` is the :func:`~qiskit.quantum_info.state_fidelity`,
:math:`\rho_{\mathcal{E}} = \Lambda_{\mathcal{E}} / d` is the
normalized :class:`~qiskit.quantum_info.Choi` matrix for the channel
:math:`\mathcal{E}`, and :math:`d` is the input dimension of
:math:`\mathcal{E}`.
When the target channel is unitary this is equivalent to
.. math::
F_{\text{pro}}(\mathcal{E}, U)
= \frac{Tr[S_U^\dagger S_{\mathcal{E}}]}{d^2}
where :math:`S_{\mathcal{E}}, S_{U}` are the
:class:`~qiskit.quantum_info.SuperOp` matrices for the *input* quantum
channel :math:`\mathcal{E}` and *target* unitary :math:`U` respectively,
and :math:`d` is the input dimension of the channel.
Args:
channel (Operator or QuantumChannel): input quantum channel.
target (Operator or QuantumChannel or None): target quantum channel.
If `None` target is the identity operator [Default: None].
require_cp (bool): require channel to be completely-positive
[Default: True].
require_tp (bool): require channel to be trace-preserving
[Default: False].
Returns:
float: The process fidelity :math:`F_{\text{pro}}`.
Raises:
QiskitError: if the channel and target do not have the same dimensions.
QiskitError: if the channel and target are not completely-positive
(with ``require_cp=True``) or not trace-preserving
(with ``require_tp=True``).
"""
# Format inputs
channel = _input_formatter(
channel, SuperOp, 'process_fidelity', 'channel')
target = _input_formatter(
target, Operator, 'process_fidelity', 'target')
if target:
# Validate dimensions
if channel.dim != target.dim:
raise QiskitError(
'Input quantum channel and target unitary must have the same '
'dimensions ({} != {}).'.format(channel.dim, target.dim))
# Validate complete-positivity and trace-preserving
for label, chan in [('Input', channel), ('Target', target)]:
if isinstance(chan, Operator) and (require_cp or require_tp):
is_unitary = chan.is_unitary()
# Validate as unitary
if require_cp and not is_unitary:
raise QiskitError('{} channel is not completely-positive'.format(label))
if require_tp and not is_unitary:
raise QiskitError('{} channel is not trace-preserving'.format(label))
elif chan is not None:
# Validate as QuantumChannel
if require_cp and not chan.is_cp():
raise QiskitError('{} channel is not completely-positive'.format(label))
if require_tp and not chan.is_tp():
raise QiskitError('{} channel is not trace-preserving'.format(label))
if isinstance(target, Operator):
# Compute fidelity with unitary target by applying the inverse
# to channel and computing fidelity with the identity
channel = channel @ target.adjoint()
target = None
input_dim, _ = channel.dim
if target is None:
# Compute process fidelity with identity channel
if isinstance(channel, Operator):
# |Tr[U]/dim| ** 2
fid = np.abs(np.trace(channel.data) / input_dim)**2
else:
# Tr[S] / (dim ** 2)
fid = np.trace(SuperOp(channel).data) / (input_dim**2)
return float(np.real(fid))
# For comparing two non-unitary channels we compute the state fidelity of
# the normalized Choi-matrices. This is equivalent to the previous definition
# when the target is a unitary channel.
state1 = DensityMatrix(Choi(channel).data / input_dim)
state2 = DensityMatrix(Choi(target).data / input_dim)
return state_fidelity(state1, state2, validate=False)
def average_gate_fidelity(channel,
target=None,
require_cp=True,
require_tp=False):
r"""Return the average gate fidelity of a noisy quantum channel.
The average gate fidelity :math:`F_{\text{ave}}` is given by
.. math::
F_{\text{ave}}(\mathcal{E}, U)
&= \int d\psi \langle\psi|U^\dagger
\mathcal{E}(|\psi\rangle\!\langle\psi|)U|\psi\rangle \\
&= \frac{d F_{\text{pro}}(\mathcal{E}, U) + 1}{d + 1}
where :math:`F_{\text{pro}}(\mathcal{E}, U)` is the
:meth:`~qiskit.quantum_info.process_fidelity` of the input quantum
*channel* :math:`\mathcal{E}` with a *target* unitary :math:`U`, and
:math:`d` is the dimension of the *channel*.
Args:
channel (QuantumChannel or Operator): noisy quantum channel.
target (Operator or None): target unitary operator.
If `None` target is the identity operator [Default: None].
require_cp (bool): require channel to be completely-positive
[Default: True].
require_tp (bool): require channel to be trace-preserving
[Default: False].
Returns:
float: The average gate fidelity :math:`F_{\text{ave}}`.
Raises:
QiskitError: if the channel and target do not have the same dimensions,
or have different input and output dimensions.
QiskitError: if the channel and target or are not completely-positive
(with ``require_cp=True``) or not trace-preserving
(with ``require_tp=True``).
"""
# Format inputs
channel = _input_formatter(
channel, SuperOp, 'average_gate_fidelity', 'channel')
target = _input_formatter(
target, Operator, 'average_gate_fidelity', 'target')
if target is not None:
try:
target = Operator(target)
except QiskitError:
raise QiskitError(
'Target channel is not a unitary channel. To compare '
'two non-unitary channels use the '
'`qiskit.quantum_info.process_fidelity` function instead.')
dim, _ = channel.dim
f_pro = process_fidelity(channel,
target=target,
require_cp=require_cp,
require_tp=require_tp)
return (dim * f_pro + 1) / (dim + 1)
def gate_error(channel, target=None, require_cp=True, require_tp=False):
r"""Return the gate error of a noisy quantum channel.
The gate error :math:`E` is given by the average gate infidelity
.. math::
E(\mathcal{E}, U) = 1 - F_{\text{ave}}(\mathcal{E}, U)
where :math:`F_{\text{ave}}(\mathcal{E}, U)` is the
:meth:`~qiskit.quantum_info.average_gate_fidelity` of the input
quantum *channel* :math:`\mathcal{E}` with a *target* unitary
:math:`U`.
Args:
channel (QuantumChannel): noisy quantum channel.
target (Operator or None): target unitary operator.
If `None` target is the identity operator [Default: None].
require_cp (bool): require channel to be completely-positive
[Default: True].
require_tp (bool): require channel to be trace-preserving
[Default: False].
Returns:
float: The average gate error :math:`E`.
Raises:
QiskitError: if the channel and target do not have the same dimensions,
or have different input and output dimensions.
QiskitError: if the channel and target or are not completely-positive
(with ``require_cp=True``) or not trace-preserving
(with ``require_tp=True``).
"""
# Format inputs
channel = _input_formatter(
channel, SuperOp, 'gate_error', 'channel')
target = _input_formatter(
target, Operator, 'gate_error', 'target')
return 1 - average_gate_fidelity(
channel, target=target, require_cp=require_cp, require_tp=require_tp)
def diamond_norm(choi, **kwargs):
r"""Return the diamond norm of the input quantum channel object.
This function computes the completely-bounded trace-norm (often
referred to as the diamond-norm) of the input quantum channel object
using the semidefinite-program from reference [1].
Args:
choi(Choi or QuantumChannel): a quantum channel object or
Choi-matrix array.
kwargs: optional arguments to pass to CVXPY solver.
Returns:
float: The completely-bounded trace norm
:math:`\|\mathcal{E}\|_{\diamond}`.
Raises:
QiskitError: if CVXPY package cannot be found.
Additional Information:
The input to this function is typically *not* a CPTP quantum
channel, but rather the *difference* between two quantum channels
:math:`\|\Delta\mathcal{E}\|_\diamond` where
:math:`\Delta\mathcal{E} = \mathcal{E}_1 - \mathcal{E}_2`.
Reference:
J. Watrous. "Simpler semidefinite programs for completely bounded
norms", arXiv:1207.5726 [quant-ph] (2012).
.. note::
This function requires the optional CVXPY package to be installed.
Any additional kwargs will be passed to the ``cvxpy.solve``
function. See the CVXPY documentation for information on available
SDP solvers.
"""
_cvxpy_check('`diamond_norm`') # Check CVXPY is installed
choi = Choi(_input_formatter(choi, Choi, 'diamond_norm', 'choi'))
def cvx_bmat(mat_r, mat_i):
"""Block matrix for embedding complex matrix in reals"""
return cvxpy.bmat([[mat_r, -mat_i], [mat_i, mat_r]])
# Dimension of input and output spaces
dim_in = choi._input_dim
dim_out = choi._output_dim
size = dim_in * dim_out
# SDP Variables to convert to real valued problem
r0_r = cvxpy.Variable((dim_in, dim_in))
r0_i = cvxpy.Variable((dim_in, dim_in))
r0 = cvx_bmat(r0_r, r0_i)
r1_r = cvxpy.Variable((dim_in, dim_in))
r1_i = cvxpy.Variable((dim_in, dim_in))
r1 = cvx_bmat(r1_r, r1_i)
x_r = cvxpy.Variable((size, size))
x_i = cvxpy.Variable((size, size))
iden = sparse.eye(dim_out)
# Watrous uses row-vec convention for his Choi matrix while we use
# col-vec. It turns out row-vec convention is requried for CVXPY too
# since the cvxpy.kron function must have a constant as its first argument.
c_r = cvxpy.bmat([[cvxpy.kron(iden, r0_r), x_r], [x_r.T, cvxpy.kron(iden, r1_r)]])
c_i = cvxpy.bmat([[cvxpy.kron(iden, r0_i), x_i], [-x_i.T, cvxpy.kron(iden, r1_i)]])
c = cvx_bmat(c_r, c_i)
# Convert col-vec convention Choi-matrix to row-vec convention and
# then take Transpose: Choi_C -> Choi_R.T
choi_rt = np.transpose(
np.reshape(choi.data, (dim_in, dim_out, dim_in, dim_out)),
(3, 2, 1, 0)).reshape(choi.data.shape)
choi_rt_r = choi_rt.real
choi_rt_i = choi_rt.imag
# Constraints
cons = [
r0 >> 0, r0_r == r0_r.T, r0_i == - r0_i.T, cvxpy.trace(r0_r) == 1,
r1 >> 0, r1_r == r1_r.T, r1_i == - r1_i.T, cvxpy.trace(r1_r) == 1,
c >> 0
]
# Objective function
obj = cvxpy.Maximize(cvxpy.trace(choi_rt_r @ x_r) + cvxpy.trace(choi_rt_i @ x_i))
prob = cvxpy.Problem(obj, cons)
sol = prob.solve(**kwargs)
return sol
def _cvxpy_check(name):
"""Check that a supported CVXPY version is installed"""
# Check if CVXPY package is installed
if not _HAS_CVX:
raise QiskitError(
'CVXPY backage is requried for {}. Install'
' with `pip install cvxpy` to use.'.format(name))
# Check CVXPY version
version = cvxpy.__version__
if version[0] != '1':
raise ImportError(
'Incompatible CVXPY version {} found.'
' Install version >=1.0.'.format(version))
# pylint: disable=too-many-return-statements
def _input_formatter(obj, fallback_class, func_name, arg_name):
"""Formatting function for input conversion"""
# Empty input
if obj is None:
return obj
# Channel-like input
if isinstance(obj, QuantumChannel):
return obj
if hasattr(obj, 'to_quantumchannel'):
return obj.to_quantumchannel()
if hasattr(obj, 'to_channel'):
return obj.to_channel()
# Unitary-like input
if isinstance(obj, (Gate, BaseOperator)):
return Operator(obj)
if hasattr(obj, 'to_operator'):
return obj.to_operator()
warnings.warn(
'Passing in a list or Numpy array to `{}` `{}` argument is '
'deprecated as of 0.17.0 since the matrix representation cannot be inferred '
'unambiguously. Use a Gate or BaseOperator subclass (eg. Operator, '
'SuperOp, Choi) object instead.'.format(func_name, arg_name),
DeprecationWarning)
warnings.warn(
'Treating array input as a {} object'.format(fallback_class.__name__))
return fallback_class(obj)
| 38.582011
| 88
| 0.637959
|
import warnings
import numpy as np
from scipy import sparse
from qiskit.exceptions import QiskitError
from qiskit.circuit.gate import Gate
from qiskit.quantum_info.operators.base_operator import BaseOperator
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel
from qiskit.quantum_info.operators.channel import Choi, SuperOp
from qiskit.quantum_info.states.densitymatrix import DensityMatrix
from qiskit.quantum_info.states.measures import state_fidelity
try:
import cvxpy
_HAS_CVX = True
except ImportError:
_HAS_CVX = False
def process_fidelity(channel,
target=None,
require_cp=True,
require_tp=False):
channel = _input_formatter(
channel, SuperOp, 'process_fidelity', 'channel')
target = _input_formatter(
target, Operator, 'process_fidelity', 'target')
if target:
if channel.dim != target.dim:
raise QiskitError(
'Input quantum channel and target unitary must have the same '
'dimensions ({} != {}).'.format(channel.dim, target.dim))
for label, chan in [('Input', channel), ('Target', target)]:
if isinstance(chan, Operator) and (require_cp or require_tp):
is_unitary = chan.is_unitary()
if require_cp and not is_unitary:
raise QiskitError('{} channel is not completely-positive'.format(label))
if require_tp and not is_unitary:
raise QiskitError('{} channel is not trace-preserving'.format(label))
elif chan is not None:
if require_cp and not chan.is_cp():
raise QiskitError('{} channel is not completely-positive'.format(label))
if require_tp and not chan.is_tp():
raise QiskitError('{} channel is not trace-preserving'.format(label))
if isinstance(target, Operator):
channel = channel @ target.adjoint()
target = None
input_dim, _ = channel.dim
if target is None:
if isinstance(channel, Operator):
fid = np.abs(np.trace(channel.data) / input_dim)**2
else:
fid = np.trace(SuperOp(channel).data) / (input_dim**2)
return float(np.real(fid))
state1 = DensityMatrix(Choi(channel).data / input_dim)
state2 = DensityMatrix(Choi(target).data / input_dim)
return state_fidelity(state1, state2, validate=False)
def average_gate_fidelity(channel,
target=None,
require_cp=True,
require_tp=False):
channel = _input_formatter(
channel, SuperOp, 'average_gate_fidelity', 'channel')
target = _input_formatter(
target, Operator, 'average_gate_fidelity', 'target')
if target is not None:
try:
target = Operator(target)
except QiskitError:
raise QiskitError(
'Target channel is not a unitary channel. To compare '
'two non-unitary channels use the '
'`qiskit.quantum_info.process_fidelity` function instead.')
dim, _ = channel.dim
f_pro = process_fidelity(channel,
target=target,
require_cp=require_cp,
require_tp=require_tp)
return (dim * f_pro + 1) / (dim + 1)
def gate_error(channel, target=None, require_cp=True, require_tp=False):
channel = _input_formatter(
channel, SuperOp, 'gate_error', 'channel')
target = _input_formatter(
target, Operator, 'gate_error', 'target')
return 1 - average_gate_fidelity(
channel, target=target, require_cp=require_cp, require_tp=require_tp)
def diamond_norm(choi, **kwargs):
_cvxpy_check('`diamond_norm`')
choi = Choi(_input_formatter(choi, Choi, 'diamond_norm', 'choi'))
def cvx_bmat(mat_r, mat_i):
return cvxpy.bmat([[mat_r, -mat_i], [mat_i, mat_r]])
dim_in = choi._input_dim
dim_out = choi._output_dim
size = dim_in * dim_out
r0_r = cvxpy.Variable((dim_in, dim_in))
r0_i = cvxpy.Variable((dim_in, dim_in))
r0 = cvx_bmat(r0_r, r0_i)
r1_r = cvxpy.Variable((dim_in, dim_in))
r1_i = cvxpy.Variable((dim_in, dim_in))
r1 = cvx_bmat(r1_r, r1_i)
x_r = cvxpy.Variable((size, size))
x_i = cvxpy.Variable((size, size))
iden = sparse.eye(dim_out)
c_r = cvxpy.bmat([[cvxpy.kron(iden, r0_r), x_r], [x_r.T, cvxpy.kron(iden, r1_r)]])
c_i = cvxpy.bmat([[cvxpy.kron(iden, r0_i), x_i], [-x_i.T, cvxpy.kron(iden, r1_i)]])
c = cvx_bmat(c_r, c_i)
choi_rt = np.transpose(
np.reshape(choi.data, (dim_in, dim_out, dim_in, dim_out)),
(3, 2, 1, 0)).reshape(choi.data.shape)
choi_rt_r = choi_rt.real
choi_rt_i = choi_rt.imag
cons = [
r0 >> 0, r0_r == r0_r.T, r0_i == - r0_i.T, cvxpy.trace(r0_r) == 1,
r1 >> 0, r1_r == r1_r.T, r1_i == - r1_i.T, cvxpy.trace(r1_r) == 1,
c >> 0
]
obj = cvxpy.Maximize(cvxpy.trace(choi_rt_r @ x_r) + cvxpy.trace(choi_rt_i @ x_i))
prob = cvxpy.Problem(obj, cons)
sol = prob.solve(**kwargs)
return sol
def _cvxpy_check(name):
if not _HAS_CVX:
raise QiskitError(
'CVXPY backage is requried for {}. Install'
' with `pip install cvxpy` to use.'.format(name))
version = cvxpy.__version__
if version[0] != '1':
raise ImportError(
'Incompatible CVXPY version {} found.'
' Install version >=1.0.'.format(version))
def _input_formatter(obj, fallback_class, func_name, arg_name):
if obj is None:
return obj
if isinstance(obj, QuantumChannel):
return obj
if hasattr(obj, 'to_quantumchannel'):
return obj.to_quantumchannel()
if hasattr(obj, 'to_channel'):
return obj.to_channel()
if isinstance(obj, (Gate, BaseOperator)):
return Operator(obj)
if hasattr(obj, 'to_operator'):
return obj.to_operator()
warnings.warn(
'Passing in a list or Numpy array to `{}` `{}` argument is '
'deprecated as of 0.17.0 since the matrix representation cannot be inferred '
'unambiguously. Use a Gate or BaseOperator subclass (eg. Operator, '
'SuperOp, Choi) object instead.'.format(func_name, arg_name),
DeprecationWarning)
warnings.warn(
'Treating array input as a {} object'.format(fallback_class.__name__))
return fallback_class(obj)
| true
| true
|
f7195bbf84421b2fed723996ef1806b0e3e52004
| 2,465
|
py
|
Python
|
backend/fleet_management/tests/test_crypto.py
|
OtisRed/pah-fm
|
68a306fce5593a6f79711fa473a91bc8163b01df
|
[
"MIT"
] | 8
|
2019-08-09T11:06:16.000Z
|
2021-10-05T14:56:31.000Z
|
backend/fleet_management/tests/test_crypto.py
|
OtisRed/pah-fm
|
68a306fce5593a6f79711fa473a91bc8163b01df
|
[
"MIT"
] | 382
|
2018-10-17T19:05:30.000Z
|
2022-02-10T07:09:45.000Z
|
backend/fleet_management/tests/test_crypto.py
|
OtisRed/pah-fm
|
68a306fce5593a6f79711fa473a91bc8163b01df
|
[
"MIT"
] | 45
|
2018-10-17T17:04:04.000Z
|
2021-10-05T14:30:35.000Z
|
from secrets import randbits
from django.conf import settings
from rest_framework.test import APISimpleTestCase
from fleet_management.crypto import (
sign,
verify,
inverse_of,
is_prime,
find_prime,
find_p_q_phi,
find_pair_of_keys,
hash_dict,
)
class CryptoTest(APISimpleTestCase):
def setUp(self) -> None:
self.n_tests = 1000
def test_sign_and_verify(self):
for _ in range(1000):
message = randbits(settings.RSA_BIT_LENGTH)
pub, priv = find_pair_of_keys()
signature = sign(message, priv)
self.assertTrue(verify(message, signature, pub))
self.assertFalse(verify(message + 1, signature, pub))
def test_inverse_of(self):
self.assertEqual(inverse_of(2, 3), 2)
self.assertEqual(inverse_of(53, 120), 77)
self.assertEqual(inverse_of(1123, 18712), 17379)
self.assertEqual(inverse_of(98751, 123719989), 68419280)
self.assertEqual(
inverse_of(65537, 1034776851837418226012406113933120080),
568411228254986589811047501435713,
)
def test_is_prime(self):
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(41))
self.assertTrue(is_prime(97571))
self.assertTrue(is_prime(56790763))
self.assertTrue(is_prime(967901315627))
self.assertFalse(is_prime(1))
self.assertFalse(is_prime(12))
self.assertFalse(is_prime(42))
self.assertFalse(is_prime(2737075))
self.assertFalse(is_prime(273707521121))
def test_find_prime(self):
for _ in range(self.n_tests):
prime = find_prime(settings.RSA_BIT_LENGTH)
self.assertTrue(is_prime(prime))
def test_find_p_q_phi(self):
for _ in range(self.n_tests):
p, q, phi = find_p_q_phi()
my_phi = (p - 1) * (q - 1)
self.assertTrue(is_prime(p))
self.assertTrue(is_prime(q))
self.assertEqual(phi, my_phi)
def test_hash_dict(self):
self.assertEqual(hash_dict({}), 17022)
self.assertEqual(hash_dict({1: 1}), 361627)
self.assertEqual(hash_dict({1: 1, "asd": "asd"}), 319826)
self.assertEqual(hash_dict({1: 1, "asd": "asd", 9: [1, 2, 3]}), 319976)
self.assertEqual(hash_dict({1: {2: {3: {4: {5: {}}}}}}), 17022)
self.assertEqual(hash_dict({1: {2: {3: {4: {5: "x"}}}}}), 288678)
| 34.236111
| 79
| 0.627586
|
from secrets import randbits
from django.conf import settings
from rest_framework.test import APISimpleTestCase
from fleet_management.crypto import (
sign,
verify,
inverse_of,
is_prime,
find_prime,
find_p_q_phi,
find_pair_of_keys,
hash_dict,
)
class CryptoTest(APISimpleTestCase):
def setUp(self) -> None:
self.n_tests = 1000
def test_sign_and_verify(self):
for _ in range(1000):
message = randbits(settings.RSA_BIT_LENGTH)
pub, priv = find_pair_of_keys()
signature = sign(message, priv)
self.assertTrue(verify(message, signature, pub))
self.assertFalse(verify(message + 1, signature, pub))
def test_inverse_of(self):
self.assertEqual(inverse_of(2, 3), 2)
self.assertEqual(inverse_of(53, 120), 77)
self.assertEqual(inverse_of(1123, 18712), 17379)
self.assertEqual(inverse_of(98751, 123719989), 68419280)
self.assertEqual(
inverse_of(65537, 1034776851837418226012406113933120080),
568411228254986589811047501435713,
)
def test_is_prime(self):
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(41))
self.assertTrue(is_prime(97571))
self.assertTrue(is_prime(56790763))
self.assertTrue(is_prime(967901315627))
self.assertFalse(is_prime(1))
self.assertFalse(is_prime(12))
self.assertFalse(is_prime(42))
self.assertFalse(is_prime(2737075))
self.assertFalse(is_prime(273707521121))
def test_find_prime(self):
for _ in range(self.n_tests):
prime = find_prime(settings.RSA_BIT_LENGTH)
self.assertTrue(is_prime(prime))
def test_find_p_q_phi(self):
for _ in range(self.n_tests):
p, q, phi = find_p_q_phi()
my_phi = (p - 1) * (q - 1)
self.assertTrue(is_prime(p))
self.assertTrue(is_prime(q))
self.assertEqual(phi, my_phi)
def test_hash_dict(self):
self.assertEqual(hash_dict({}), 17022)
self.assertEqual(hash_dict({1: 1}), 361627)
self.assertEqual(hash_dict({1: 1, "asd": "asd"}), 319826)
self.assertEqual(hash_dict({1: 1, "asd": "asd", 9: [1, 2, 3]}), 319976)
self.assertEqual(hash_dict({1: {2: {3: {4: {5: {}}}}}}), 17022)
self.assertEqual(hash_dict({1: {2: {3: {4: {5: "x"}}}}}), 288678)
| true
| true
|
f7195c0c364fe7bce695c427067417d2cf71be24
| 3,496
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/pontibacterchinhatensis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/pontibacterchinhatensis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/pontibacterchinhatensis.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Pontibacter chinhatensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def PontibacterChinhatensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Pontibacter chinhatensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Pontibacter chinhatensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PontibacterChinhatensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.295238
| 223
| 0.67992
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def PontibacterChinhatensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="PontibacterChinhatensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true
| true
|
f7195c158eefd24c981c80cfe2493d7d0991e7c0
| 1,263
|
py
|
Python
|
jesse/indicators/supersmoother.py
|
noenfugler/jesse
|
217a3168620a755c1a9576d9deb27105db7dccf8
|
[
"MIT"
] | 1
|
2021-03-25T09:25:49.000Z
|
2021-03-25T09:25:49.000Z
|
jesse/indicators/supersmoother.py
|
noenfugler/jesse
|
217a3168620a755c1a9576d9deb27105db7dccf8
|
[
"MIT"
] | null | null | null |
jesse/indicators/supersmoother.py
|
noenfugler/jesse
|
217a3168620a755c1a9576d9deb27105db7dccf8
|
[
"MIT"
] | 1
|
2021-09-28T16:23:40.000Z
|
2021-09-28T16:23:40.000Z
|
from typing import Union
import numpy as np
from numba import njit
from jesse.helpers import get_candle_source, slice_candles
def supersmoother(candles: np.ndarray, period: int = 14, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
Super Smoother Filter 2pole Butterworth
This indicator was described by John F. Ehlers
:param candles: np.ndarray
:param period: int - default=14
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
# Accept normal array too.
if len(candles.shape) == 1:
source = candles
else:
source = get_candle_source(candles, source_type=source_type)
res = supersmoother_fast(source, period)
return res if sequential else res[-1]
@njit
def supersmoother_fast(source, period):
a = np.exp(-1.414 * np.pi / period)
b = 2 * a * np.cos(1.414 * np.pi / period)
newseries = np.copy(source)
for i in range(2, source.shape[0]):
newseries[i] = (1 + a ** 2 - b) / 2 * (source[i] + source[i - 1]) \
+ b * newseries[i - 1] - a ** 2 * newseries[i - 2]
return newseries
| 28.066667
| 120
| 0.638163
|
from typing import Union
import numpy as np
from numba import njit
from jesse.helpers import get_candle_source, slice_candles
def supersmoother(candles: np.ndarray, period: int = 14, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
candles = slice_candles(candles, sequential)
if len(candles.shape) == 1:
source = candles
else:
source = get_candle_source(candles, source_type=source_type)
res = supersmoother_fast(source, period)
return res if sequential else res[-1]
@njit
def supersmoother_fast(source, period):
a = np.exp(-1.414 * np.pi / period)
b = 2 * a * np.cos(1.414 * np.pi / period)
newseries = np.copy(source)
for i in range(2, source.shape[0]):
newseries[i] = (1 + a ** 2 - b) / 2 * (source[i] + source[i - 1]) \
+ b * newseries[i - 1] - a ** 2 * newseries[i - 2]
return newseries
| true
| true
|
f7195cbfea553513473b2df5d5bf67f1cff230ba
| 12,078
|
py
|
Python
|
cellrank/pl/_circular_projection.py
|
WeilerP/cellrank
|
c8c2b9f6bd2448861fb414435aee7620ca5a0bad
|
[
"BSD-3-Clause"
] | 172
|
2020-03-19T19:50:53.000Z
|
2022-03-28T09:36:04.000Z
|
cellrank/pl/_circular_projection.py
|
WeilerP/cellrank
|
c8c2b9f6bd2448861fb414435aee7620ca5a0bad
|
[
"BSD-3-Clause"
] | 702
|
2020-03-19T08:09:04.000Z
|
2022-03-30T09:55:14.000Z
|
cellrank/pl/_circular_projection.py
|
WeilerP/cellrank
|
c8c2b9f6bd2448861fb414435aee7620ca5a0bad
|
[
"BSD-3-Clause"
] | 17
|
2020-04-07T03:11:02.000Z
|
2022-02-02T20:39:16.000Z
|
from typing import Any, Tuple, Union, Mapping, Callable, Optional, Sequence
from typing_extensions import Literal
from enum import auto
from types import MappingProxyType
from pathlib import Path
import scvelo as scv
from anndata import AnnData
from cellrank import logging as logg
from cellrank.tl import Lineage
from cellrank._key import Key
from scanpy._utils import deprecated_arg_names
from cellrank.tl._enum import ModeEnum
from cellrank.ul._docs import d
from cellrank.pl._utils import _held_karp
from cellrank.tl._utils import save_fig, _unique_order_preserving
from cellrank.ul._utils import _check_collection
from cellrank.tl._lineage import PrimingDegree
import numpy as np
import pandas as pd
from sklearn.metrics import pairwise_distances
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, LinearSegmentedColormap
from matplotlib.collections import LineCollection
class LineageOrder(ModeEnum): # noqa: D101
DEFAULT = auto()
OPTIMAL = auto()
class LabelRot(ModeEnum): # noqa: D101
DEFAULT = auto()
BEST = auto()
Metric_T = Union[str, Callable, np.ndarray, pd.DataFrame]
_N = 200
def _get_distances(data: Union[np.ndarray, Lineage], metric: Metric_T) -> np.ndarray:
if isinstance(data, Lineage):
data = data.X
if isinstance(metric, str) or callable(metric):
metric = pairwise_distances(data.T, metric=metric)
elif isinstance(metric, (pd.DataFrame, np.ndarray)):
shape = (data.shape[1], data.shape[1])
if metric.shape != shape:
raise ValueError(
f"Expected an `numpy.array` or `pandas.DataFrame` of shape `{shape}`, found `{metric.shape}`."
)
else:
raise TypeError(
f"Expected either metric defined by `str`, `callable` or a pairwise distance matrix of type"
f" `numpy.ndarray` or `pandas.DataFrame`, found `{type(metric).__name__}`."
)
return np.asarray(metric, dtype=np.float64)
def _get_optimal_order(data: Lineage, metric: Metric_T) -> Tuple[float, np.ndarray]:
"""Solve the TSP using dynamic programming."""
return _held_karp(_get_distances(data, metric))
@d.dedent
@deprecated_arg_names({"labeldistance": "label_distance", "labelrot": "label_rot"})
def circular_projection(
adata: AnnData,
keys: Union[str, Sequence[str]],
backward: bool = False,
lineages: Optional[Union[str, Sequence[str]]] = None,
early_cells: Optional[Union[Mapping[str, Sequence[str]], Sequence[str]]] = None,
lineage_order: Optional[Literal["default", "optimal"]] = None,
metric: Union[str, Callable, np.ndarray, pd.DataFrame] = "correlation",
normalize_by_mean: bool = True,
ncols: int = 4,
space: float = 0.25,
use_raw: bool = False,
text_kwargs: Mapping[str, Any] = MappingProxyType({}),
label_distance: float = 1.25,
label_rot: Union[Literal["default", "best"], float] = "best",
show_edges: bool = True,
key_added: Optional[str] = None,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
save: Optional[Union[str, Path]] = None,
**kwargs: Any,
):
r"""
Plot absorption probabilities on a circular embedding as in :cite:`velten:17`.
Parameters
----------
%(adata)s
keys
Keys in :attr:`anndata.AnnData.obs` or :attr:`anndata.AnnData.var_names`. Additional keys are:
- `'kl_divergence'` - as in :cite:`velten:17`, computes KL-divergence between the fate probabilities
of a cell and the average fate probabilities. See ``early_cells`` for more information.
- `'entropy'` - as in :cite:`setty:19`, computes entropy over a cells fate probabilities.
%(backward)s
lineages
Lineages to plot. If `None`, plot all lineages.
early_cells
Cell ids or a mask marking early cells used to define the average fate probabilities. If `None`, use all cells.
Only used when `'kl_divergence'` is in ``keys``. If a :class:`dict`, key specifies a cluster key in
:attr:`anndata.AnnData.obs` and the values specify cluster labels containing early cells.
lineage_order
Can be one of the following:
- `None` - it will determined automatically, based on the number of lineages.
- `'optimal'` - order lineages optimally by solving the Travelling salesman problem (TSP).
Recommended for <= `20` lineages.
- `'default'` - use the order as specified by ``lineages``.
metric
Metric to use when constructing pairwise distance matrix when ``lineage_order = 'optimal'``. For available
options, see :func:`sklearn.metrics.pairwise_distances`.
normalize_by_mean
If `True`, normalize each lineage by its mean probability, as done in :cite:`velten:17`.
ncols
Number of columns when plotting multiple ``keys``.
space
Horizontal and vertical space between for :func:`matplotlib.pyplot.subplots_adjust`.
use_raw
Whether to access :attr:`anndata.AnnData.raw` when there are ``keys`` in :attr:`anndata.AnnData.var_names`.
text_kwargs
Keyword arguments for :func:`matplotlib.pyplot.text`.
label_distance
Distance at which the lineage labels will be drawn.
label_rot
How to rotate the labels. Valid options are:
- `'best'` - rotate labels so that they are easily readable.
- `'default'` - use :mod:`matplotlib`'s default.
- `None` - same as `'default'`.
If a :class:`float`, all labels will be rotated by this many degrees.
show_edges
Whether to show the edges surrounding the simplex.
key_added
Key in :attr:`anndata.AnnData.obsm` where to add the circular embedding. If `None`, it will be set to
`'X_fate_simplex_{fwd,bwd}'`, based on ``backward``.
%(plotting)s
kwargs
Keyword arguments for :func:`scvelo.pl.scatter`.
Returns
-------
%(just_plots)s
Also updates ``adata`` with the following fields:
- :attr:`anndata.AnnData.obsm` ``['{key_added}']`` - the circular projection.
- :attr:`anndata.AnnData.obs` ``['to_{initial,terminal}_states_{method}']`` - the priming degree,
if a method is present in ``keys``.
"""
if label_distance is not None and label_distance < 0:
raise ValueError(
f"Expected `label_distance` to be positive, found `{label_distance}`."
)
if label_rot is None:
label_rot = LabelRot.DEFAULT
label_rot = LabelRot(label_rot)
suffix = "bwd" if backward else "fwd"
if key_added is None:
key_added = "X_fate_simplex_" + suffix
if isinstance(keys, str):
keys = (keys,)
keys = _unique_order_preserving(keys)
keys_ = _check_collection(
adata, keys, "obs", key_name="Observation", raise_exc=False
) + _check_collection(
adata, keys, "var_names", key_name="Gene", raise_exc=False, use_raw=use_raw
)
haystack = set(PrimingDegree)
keys = keys_ + [k for k in keys if k in haystack]
keys = _unique_order_preserving(keys)
if not len(keys):
raise ValueError("No valid keys have been selected.")
lineage_key = Key.obsm.abs_probs(backward)
if lineage_key not in adata.obsm:
raise KeyError(f"Lineages key `{lineage_key!r}` not found in `adata.obsm`.")
probs: Lineage = adata.obsm[lineage_key]
if isinstance(lineages, str):
lineages = (lineages,)
elif lineages is None:
lineages = probs.names
probs = adata.obsm[lineage_key][lineages]
n_lin = probs.shape[1]
if n_lin < 3:
raise ValueError(f"Expected at least `3` lineages, found `{n_lin}`.")
X = probs.X.copy()
if normalize_by_mean:
X /= np.mean(X, axis=0)[None, :]
X /= X.sum(1)[:, None]
# this happens when cells for sel. lineages sum to 1 (or when the lineage average is 0, which is unlikely)
X = np.nan_to_num(X, nan=1.0 / n_lin, copy=False)
if lineage_order is None:
lineage_order = (
LineageOrder.OPTIMAL if 3 < n_lin <= 20 else LineageOrder.DEFAULT
)
logg.debug(f"Set ordering to `{lineage_order}`")
lineage_order = LineageOrder(lineage_order)
if lineage_order == LineageOrder.OPTIMAL:
logg.info(f"Solving TSP for `{n_lin}` states")
_, order = _get_optimal_order(X, metric=metric)
else:
order = np.arange(n_lin)
probs = probs[:, order]
X = X[:, order]
angle_vec = np.linspace(0, 2 * np.pi, n_lin, endpoint=False)
angle_vec_sin = np.cos(angle_vec)
angle_vec_cos = np.sin(angle_vec)
x = np.sum(X * angle_vec_sin, axis=1)
y = np.sum(X * angle_vec_cos, axis=1)
adata.obsm[key_added] = np.c_[x, y]
nrows = int(np.ceil(len(keys) / ncols))
fig, ax = plt.subplots(
nrows=nrows,
ncols=ncols,
figsize=(ncols * 5, nrows * 5) if figsize is None else figsize,
dpi=dpi,
)
fig.subplots_adjust(wspace=space, hspace=space)
axes = np.ravel([ax])
text_kwargs = dict(text_kwargs)
text_kwargs["ha"] = "center"
text_kwargs["va"] = "center"
_i = 0
for _i, (k, ax) in enumerate(zip(keys, axes)):
set_lognorm, colorbar = False, kwargs.pop("colorbar", True)
try:
_ = PrimingDegree(k)
logg.debug(f"Calculating priming degree using `method={k}`")
val = probs.priming_degree(method=k, early_cells=early_cells)
k = f"{lineage_key}_{k}"
adata.obs[k] = val
except ValueError:
pass
scv.pl.scatter(
adata,
basis=key_added,
color=k,
show=False,
ax=ax,
use_raw=use_raw,
norm=LogNorm() if set_lognorm else None,
colorbar=colorbar,
**kwargs,
)
if colorbar and set_lognorm:
cbar = ax.collections[0].colorbar
cax = cbar.locator.axis
ticks = cax.minor.locator.tick_values(cbar.vmin, cbar.vmax)
ticks = [ticks[0], ticks[len(ticks) // 2 + 1], ticks[-1]]
cbar.set_ticks(ticks)
cbar.set_ticklabels([f"{t:.2f}" for t in ticks])
cbar.update_ticks()
patches, texts = ax.pie(
np.ones_like(angle_vec),
labeldistance=label_distance,
rotatelabels=True,
labels=probs.names[::-1],
startangle=-360 / len(angle_vec) / 2,
counterclock=False,
textprops=text_kwargs,
)
for patch in patches:
patch.set_visible(False)
# clockwise
for color, text in zip(probs.colors[::-1], texts):
if isinstance(label_rot, (int, float)):
text.set_rotation(label_rot)
elif label_rot == LabelRot.BEST:
rot = text.get_rotation()
text.set_rotation(rot + 90 + (1 - rot // 180) * 180)
elif label_rot != LabelRot.DEFAULT:
raise NotImplementedError(
f"Label rotation `{label_rot}` is not yet implemented."
)
text.set_color(color)
if not show_edges:
continue
for i, color in enumerate(probs.colors):
next = (i + 1) % n_lin
x = 1.04 * np.linspace(angle_vec_sin[i], angle_vec_sin[next], _N)
y = 1.04 * np.linspace(angle_vec_cos[i], angle_vec_cos[next], _N)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
cmap = LinearSegmentedColormap.from_list(
"abs_prob_cmap", [color, probs.colors[next]], N=_N
)
lc = LineCollection(segments, cmap=cmap, zorder=-1)
lc.set_array(np.linspace(0, 1, _N))
lc.set_linewidth(2)
ax.add_collection(lc)
for j in range(_i + 1, len(axes)):
axes[j].remove()
if save is not None:
save_fig(fig, save)
| 35.946429
| 119
| 0.626925
|
from typing import Any, Tuple, Union, Mapping, Callable, Optional, Sequence
from typing_extensions import Literal
from enum import auto
from types import MappingProxyType
from pathlib import Path
import scvelo as scv
from anndata import AnnData
from cellrank import logging as logg
from cellrank.tl import Lineage
from cellrank._key import Key
from scanpy._utils import deprecated_arg_names
from cellrank.tl._enum import ModeEnum
from cellrank.ul._docs import d
from cellrank.pl._utils import _held_karp
from cellrank.tl._utils import save_fig, _unique_order_preserving
from cellrank.ul._utils import _check_collection
from cellrank.tl._lineage import PrimingDegree
import numpy as np
import pandas as pd
from sklearn.metrics import pairwise_distances
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, LinearSegmentedColormap
from matplotlib.collections import LineCollection
class LineageOrder(ModeEnum):
DEFAULT = auto()
OPTIMAL = auto()
class LabelRot(ModeEnum):
DEFAULT = auto()
BEST = auto()
Metric_T = Union[str, Callable, np.ndarray, pd.DataFrame]
_N = 200
def _get_distances(data: Union[np.ndarray, Lineage], metric: Metric_T) -> np.ndarray:
if isinstance(data, Lineage):
data = data.X
if isinstance(metric, str) or callable(metric):
metric = pairwise_distances(data.T, metric=metric)
elif isinstance(metric, (pd.DataFrame, np.ndarray)):
shape = (data.shape[1], data.shape[1])
if metric.shape != shape:
raise ValueError(
f"Expected an `numpy.array` or `pandas.DataFrame` of shape `{shape}`, found `{metric.shape}`."
)
else:
raise TypeError(
f"Expected either metric defined by `str`, `callable` or a pairwise distance matrix of type"
f" `numpy.ndarray` or `pandas.DataFrame`, found `{type(metric).__name__}`."
)
return np.asarray(metric, dtype=np.float64)
def _get_optimal_order(data: Lineage, metric: Metric_T) -> Tuple[float, np.ndarray]:
return _held_karp(_get_distances(data, metric))
@d.dedent
@deprecated_arg_names({"labeldistance": "label_distance", "labelrot": "label_rot"})
def circular_projection(
adata: AnnData,
keys: Union[str, Sequence[str]],
backward: bool = False,
lineages: Optional[Union[str, Sequence[str]]] = None,
early_cells: Optional[Union[Mapping[str, Sequence[str]], Sequence[str]]] = None,
lineage_order: Optional[Literal["default", "optimal"]] = None,
metric: Union[str, Callable, np.ndarray, pd.DataFrame] = "correlation",
normalize_by_mean: bool = True,
ncols: int = 4,
space: float = 0.25,
use_raw: bool = False,
text_kwargs: Mapping[str, Any] = MappingProxyType({}),
label_distance: float = 1.25,
label_rot: Union[Literal["default", "best"], float] = "best",
show_edges: bool = True,
key_added: Optional[str] = None,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
save: Optional[Union[str, Path]] = None,
**kwargs: Any,
):
if label_distance is not None and label_distance < 0:
raise ValueError(
f"Expected `label_distance` to be positive, found `{label_distance}`."
)
if label_rot is None:
label_rot = LabelRot.DEFAULT
label_rot = LabelRot(label_rot)
suffix = "bwd" if backward else "fwd"
if key_added is None:
key_added = "X_fate_simplex_" + suffix
if isinstance(keys, str):
keys = (keys,)
keys = _unique_order_preserving(keys)
keys_ = _check_collection(
adata, keys, "obs", key_name="Observation", raise_exc=False
) + _check_collection(
adata, keys, "var_names", key_name="Gene", raise_exc=False, use_raw=use_raw
)
haystack = set(PrimingDegree)
keys = keys_ + [k for k in keys if k in haystack]
keys = _unique_order_preserving(keys)
if not len(keys):
raise ValueError("No valid keys have been selected.")
lineage_key = Key.obsm.abs_probs(backward)
if lineage_key not in adata.obsm:
raise KeyError(f"Lineages key `{lineage_key!r}` not found in `adata.obsm`.")
probs: Lineage = adata.obsm[lineage_key]
if isinstance(lineages, str):
lineages = (lineages,)
elif lineages is None:
lineages = probs.names
probs = adata.obsm[lineage_key][lineages]
n_lin = probs.shape[1]
if n_lin < 3:
raise ValueError(f"Expected at least `3` lineages, found `{n_lin}`.")
X = probs.X.copy()
if normalize_by_mean:
X /= np.mean(X, axis=0)[None, :]
X /= X.sum(1)[:, None]
X = np.nan_to_num(X, nan=1.0 / n_lin, copy=False)
if lineage_order is None:
lineage_order = (
LineageOrder.OPTIMAL if 3 < n_lin <= 20 else LineageOrder.DEFAULT
)
logg.debug(f"Set ordering to `{lineage_order}`")
lineage_order = LineageOrder(lineage_order)
if lineage_order == LineageOrder.OPTIMAL:
logg.info(f"Solving TSP for `{n_lin}` states")
_, order = _get_optimal_order(X, metric=metric)
else:
order = np.arange(n_lin)
probs = probs[:, order]
X = X[:, order]
angle_vec = np.linspace(0, 2 * np.pi, n_lin, endpoint=False)
angle_vec_sin = np.cos(angle_vec)
angle_vec_cos = np.sin(angle_vec)
x = np.sum(X * angle_vec_sin, axis=1)
y = np.sum(X * angle_vec_cos, axis=1)
adata.obsm[key_added] = np.c_[x, y]
nrows = int(np.ceil(len(keys) / ncols))
fig, ax = plt.subplots(
nrows=nrows,
ncols=ncols,
figsize=(ncols * 5, nrows * 5) if figsize is None else figsize,
dpi=dpi,
)
fig.subplots_adjust(wspace=space, hspace=space)
axes = np.ravel([ax])
text_kwargs = dict(text_kwargs)
text_kwargs["ha"] = "center"
text_kwargs["va"] = "center"
_i = 0
for _i, (k, ax) in enumerate(zip(keys, axes)):
set_lognorm, colorbar = False, kwargs.pop("colorbar", True)
try:
_ = PrimingDegree(k)
logg.debug(f"Calculating priming degree using `method={k}`")
val = probs.priming_degree(method=k, early_cells=early_cells)
k = f"{lineage_key}_{k}"
adata.obs[k] = val
except ValueError:
pass
scv.pl.scatter(
adata,
basis=key_added,
color=k,
show=False,
ax=ax,
use_raw=use_raw,
norm=LogNorm() if set_lognorm else None,
colorbar=colorbar,
**kwargs,
)
if colorbar and set_lognorm:
cbar = ax.collections[0].colorbar
cax = cbar.locator.axis
ticks = cax.minor.locator.tick_values(cbar.vmin, cbar.vmax)
ticks = [ticks[0], ticks[len(ticks) // 2 + 1], ticks[-1]]
cbar.set_ticks(ticks)
cbar.set_ticklabels([f"{t:.2f}" for t in ticks])
cbar.update_ticks()
patches, texts = ax.pie(
np.ones_like(angle_vec),
labeldistance=label_distance,
rotatelabels=True,
labels=probs.names[::-1],
startangle=-360 / len(angle_vec) / 2,
counterclock=False,
textprops=text_kwargs,
)
for patch in patches:
patch.set_visible(False)
for color, text in zip(probs.colors[::-1], texts):
if isinstance(label_rot, (int, float)):
text.set_rotation(label_rot)
elif label_rot == LabelRot.BEST:
rot = text.get_rotation()
text.set_rotation(rot + 90 + (1 - rot // 180) * 180)
elif label_rot != LabelRot.DEFAULT:
raise NotImplementedError(
f"Label rotation `{label_rot}` is not yet implemented."
)
text.set_color(color)
if not show_edges:
continue
for i, color in enumerate(probs.colors):
next = (i + 1) % n_lin
x = 1.04 * np.linspace(angle_vec_sin[i], angle_vec_sin[next], _N)
y = 1.04 * np.linspace(angle_vec_cos[i], angle_vec_cos[next], _N)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
cmap = LinearSegmentedColormap.from_list(
"abs_prob_cmap", [color, probs.colors[next]], N=_N
)
lc = LineCollection(segments, cmap=cmap, zorder=-1)
lc.set_array(np.linspace(0, 1, _N))
lc.set_linewidth(2)
ax.add_collection(lc)
for j in range(_i + 1, len(axes)):
axes[j].remove()
if save is not None:
save_fig(fig, save)
| true
| true
|
f7195d706b8209cba3d1242687affeb52b7f4d89
| 10,085
|
py
|
Python
|
app/recipe/tests/test_recipe_api.py
|
ajayhb/recipe-app
|
226b1f4cce34412833a943e92d77f1f85775a2fc
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
ajayhb/recipe-app
|
226b1f4cce34412833a943e92d77f1f85775a2fc
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
ajayhb/recipe-app
|
226b1f4cce34412833a943e92d77f1f85775a2fc
|
[
"MIT"
] | null | null | null |
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from decimal import Decimal
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return url of recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
'''Add recipe_detail url'''
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
"""Create and Return a sample Tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Main Course'):
"""Create and Return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
'''Create and return a sample recipe'''
defaults = {
'title': 'Sample recipe',
'time_minutes': 3,
'price': 30.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
'''Test unauthenticated recipe api access'''
def setUp(self):
self.client = APIClient()
def test_auth_reqd(self):
'''Test that authentication is required'''
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
'''Test unauthenticated recipe api access'''
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'ajay.b@servify.in',
'testpassword'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipe(self):
'''Test retrieving the list of recipes'''
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipe_limited_to_user(self):
'''Test retrieving recipes for user'''
user2 = get_user_model().objects.create_user(
'ajay1234@gmail.com',
'password213'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
self.assertEqual(len(res.data), 1)
def test_view_recipe_detail(self):
'''Test viewing a recipe detail'''
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
'''Test creating recipe'''
payload = {
'title': 'Chocolate Cheesecake',
'time_minutes': 30,
'price': 53.34
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
# Coz we can't do recipe.key directly so getattr() is used
# self.assertEqual(payload[key], getattr(recipe, key))
self.assertEqual(recipe.price, Decimal('53.34'))
def test_create_recipe_with_tags(self):
'''Test creating a recipe with tags'''
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocado lime Cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 353.34
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_for_ingredients(self):
'''Test creating recipe with ingredients'''
ingredient1 = sample_ingredient(user=self.user, name='Noodles')
ingredient2 = sample_ingredient(user=self.user, name='Manchurian')
payload = {
'title': 'Manchurian Noodles',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 15,
'price': 133.34
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
'''Test updating a recipe with patch'''
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {
'title': 'Manchurian Paneer Noodles',
'tags': [new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
'''Test updating a recipe with put'''
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Manchurian Paneer Spaghetti',
'time_minutes': 15,
'price': 133.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeUploadImageTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'user@123.com',
'testpass'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
'''Remove the temporary files after test runs'''
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
'''Test for uploading an image to recipe'''
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
# print(url, ntf)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
'''test uploading an invalid image'''
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
'''Test returning recipes with specific tags'''
recipe1 = sample_recipe(user=self.user, title='Thai Curry')
recipe2 = sample_recipe(user=self.user, title='Dal chawal')
tag1 = sample_tag(user=self.user, name='Veggie')
tag2 = sample_tag(user=self.user, name='Jain')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Machie')
res = self.client.get(
RECIPES_URL,
{'tags': f'{tag1.id},{tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
'''Test returning recipes with specific ingredients'''
recipe1 = sample_recipe(user=self.user, title='Thai Curry')
recipe2 = sample_recipe(user=self.user, title='Dal chawal')
recipe3 = sample_recipe(user=self.user, title='Machie')
ingredient1 = sample_ingredient(user=self.user, name='Salt')
ingredient2 = sample_ingredient(user=self.user, name='Pakoda')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
res = self.client.get(
RECIPES_URL,
{'ingredients': f'{ingredient1.id},{ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 34.186441
| 78
| 0.64115
|
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from decimal import Decimal
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Main Course'):
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
defaults = {
'title': 'Sample recipe',
'time_minutes': 3,
'price': 30.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_auth_reqd(self):
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'ajay.b@servify.in',
'testpassword'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipe(self):
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipe_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'ajay1234@gmail.com',
'password213'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
self.assertEqual(len(res.data), 1)
def test_view_recipe_detail(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
payload = {
'title': 'Chocolate Cheesecake',
'time_minutes': 30,
'price': 53.34
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
# self.assertEqual(payload[key], getattr(recipe, key))
self.assertEqual(recipe.price, Decimal('53.34'))
def test_create_recipe_with_tags(self):
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocado lime Cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 353.34
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_for_ingredients(self):
ingredient1 = sample_ingredient(user=self.user, name='Noodles')
ingredient2 = sample_ingredient(user=self.user, name='Manchurian')
payload = {
'title': 'Manchurian Noodles',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 15,
'price': 133.34
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {
'title': 'Manchurian Paneer Noodles',
'tags': [new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Manchurian Paneer Spaghetti',
'time_minutes': 15,
'price': 133.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeUploadImageTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'user@123.com',
'testpass'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
# print(url, ntf)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
recipe1 = sample_recipe(user=self.user, title='Thai Curry')
recipe2 = sample_recipe(user=self.user, title='Dal chawal')
tag1 = sample_tag(user=self.user, name='Veggie')
tag2 = sample_tag(user=self.user, name='Jain')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Machie')
res = self.client.get(
RECIPES_URL,
{'tags': f'{tag1.id},{tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
recipe1 = sample_recipe(user=self.user, title='Thai Curry')
recipe2 = sample_recipe(user=self.user, title='Dal chawal')
recipe3 = sample_recipe(user=self.user, title='Machie')
ingredient1 = sample_ingredient(user=self.user, name='Salt')
ingredient2 = sample_ingredient(user=self.user, name='Pakoda')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
res = self.client.get(
RECIPES_URL,
{'ingredients': f'{ingredient1.id},{ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| true
| true
|
f7195f13467831480cf8e17b1264da8851ad2e64
| 21,774
|
py
|
Python
|
quant/platform/kucoin.py
|
yfjelley/thenextquant
|
5a2c4324ea390b513632ed2cc64d53314624e4ba
|
[
"MIT"
] | 2
|
2021-09-22T08:41:55.000Z
|
2021-11-05T01:45:27.000Z
|
quant/platform/kucoin.py
|
mrganer/thenextquant
|
52fb22f5df20d43cb275a08adad81dc97f25a712
|
[
"MIT"
] | 1
|
2019-10-25T05:25:28.000Z
|
2019-10-25T05:25:28.000Z
|
quant/platform/kucoin.py
|
mrganer/thenextquant
|
52fb22f5df20d43cb275a08adad81dc97f25a712
|
[
"MIT"
] | 4
|
2019-11-29T03:12:34.000Z
|
2021-09-19T02:59:29.000Z
|
# -*- coding:utf-8 -*-
"""
Kucoin Trade module.
https://docs.kucoin.com
Author: HuangTao
Date: 2019/08/01
Email: huangtao@ifclover.com
"""
import json
import copy
import hmac
import base64
import hashlib
from urllib.parse import urljoin
from quant.error import Error
from quant.utils import tools
from quant.utils import logger
from quant.const import KUCOIN
from quant.order import Order
from quant.asset import Asset, AssetSubscribe
from quant.tasks import SingleTask, LoopRunTask
from quant.utils.http_client import AsyncHttpRequests
from quant.utils.decorator import async_method_locker
from quant.order import ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET
from quant.order import ORDER_ACTION_BUY, ORDER_ACTION_SELL
from quant.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, \
ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED, ORDER_STATUS_NONE
__all__ = ("KucoinRestAPI", "KucoinTrade", )
class KucoinRestAPI:
""" Kucoin REST API client.
Attributes:
host: HTTP request host.
access_key: Account"s ACCESS KEY.
secret_key: Account"s SECRET KEY.
passphrase: API KEY passphrase.
"""
def __init__(self, host, access_key, secret_key, passphrase):
"""initialize REST API client."""
self._host = host
self._access_key = access_key
self._secret_key = secret_key
self._passphrase = passphrase
async def get_sub_users(self):
"""Get the user info of all sub-users via this interface.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
uri = "/api/v1/sub/user"
success, error = await self.request("GET", uri, auth=True)
return success, error
async def get_accounts(self, account_type=None, currency=None):
"""Get a list of accounts.
Args:
account_type: Account type, main or trade.
currency: Currency name, e.g. BTC, ETH ...
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
uri = "/api/v1/accounts"
params = {}
if account_type:
params["type"] = account_type
if currency:
params["currency"] = currency
success, error = await self.request("GET", uri, params=params, auth=True)
return success, error
async def get_account(self, account_id):
"""Information for a single account.
Args:
account_id: Account id.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
uri = "/api/v1/accounts/{}".format(account_id)
success, error = await self.request("GET", uri, auth=True)
return success, error
async def create_account(self, account_type, currency):
"""Create a account.
Args:
account_type: Account type, main or trade.
currency: Currency name, e.g. BTC, ETH ...
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
uri = "/api/v1/accounts"
body = {
"type": account_type,
"currency": currency
}
success, error = await self.request("POST", uri, body=body, auth=True)
return success, error
async def create_order(self, client_id, side, symbol, order_type, price, size):
""" Add standard order.
Args:
client_id: Unique order id selected by you to identify your order.
side: Trade side, buy or sell.
symbol: A valid trading symbol code. e.g. ETH-BTC.
order_type: Order type, limit or market (default is limit).
price: Price per base currency.
size: Amount of base currency to buy or sell.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
uri = "/api/v1/orders"
body = {
"clientOid": client_id,
"side": side,
"symbol": symbol,
"type": order_type,
"price": price,
"size": size
}
success, error = await self.request("POST", uri, body=body, auth=True)
return success, error
async def revoke_order(self, order_id):
""" Cancel a previously placed order.
Args:
order_id: Order ID, unique identifier of an order.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
uri = "/api/v1/orders/{}".format(order_id)
success, error = await self.request("DELETE", uri, auth=True)
return success, error
async def revoke_orders_all(self, symbol=None):
""" Attempt to cancel all open orders. The response is a list of ids of the canceled orders.
Args:
symbol: A valid trading symbol code. e.g. ETH-BTC.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
uri = "/api/v1/orders"
params = {}
if symbol:
params["symbol"] = symbol
success, error = await self.request("DELETE", uri, params=params, auth=True)
return success, error
async def get_order_list(self, status="active", symbol=None, order_type=None, start=None, end=None):
""" Get order information list.
Args:
status: Only list orders with a specific status, `active` or `done`, default is `active`.
symbol: A valid trading symbol code. e.g. ETH-BTC.
order_type: Order type, limit, market, limit_stop or market_stop.
start: Start time. Unix timestamp calculated in milliseconds will return only items which were created
after the start time.
end: End time. Unix timestamp calculated in milliseconds will return only items which were created
before the end time.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
uri = "/api/v1/orders"
params = {"status": status}
if symbol:
params["symbol"] = symbol
if order_type:
params["type"] = order_type
if start:
params["startAt"] = start
if end:
params["endAt"] = end
success, error = await self.request("GET", uri, params=params, auth=True)
return success, error
async def get_order_detail(self, order_id):
""" Get a single order by order ID.
Args:
order_id: Order ID, unique identifier of an order.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
uri = "/api/v1/orders/{}".format(order_id)
success, error = await self.request("GET", uri, auth=True)
return success, error
async def get_websocket_token(self, private=False):
""" Get a Websocket token from server.
Args:
private: If a private token, default is False.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
if private:
uri = "/api/v1/bullet-private"
success, error = await self.request("POST", uri, auth=True)
else:
uri = "/api/v1/bullet-public"
success, error = await self.request("POST", uri)
return success, error
async def get_orderbook(self, symbol, count=20):
""" Get orderbook information.
Args:
symbol: A valid trading symbol code. e.g. ETH-BTC.
count: Orderbook length, only support 20 or 100.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
if count == 20:
uri = "/api/v1/market/orderbook/level2_20?symbol={}".format(symbol)
else:
uri = "/api/v2/market/orderbook/level2_100?symbol={}".format(symbol)
success, error = await self.request("GET", uri)
return success, error
async def request(self, method, uri, params=None, body=None, headers=None, auth=False):
""" Do HTTP request.
Args:
method: HTTP request method. GET, POST, DELETE, PUT.
uri: HTTP request uri.
params: HTTP query params.
body: HTTP request body.
headers: HTTP request headers.
auth: If this request requires authentication.
Returns:
success: Success results, otherwise it"s None.
error: Error information, otherwise it"s None.
"""
if params:
query = "&".join(["{}={}".format(k, params[k]) for k in sorted(params.keys())])
uri += "?" + query
url = urljoin(self._host, uri)
if auth:
if not headers:
headers = {}
timestamp = str(tools.get_cur_timestamp_ms())
signature = self._generate_signature(timestamp, method, uri, body)
headers["KC-API-KEY"] = self._access_key
headers["KC-API-SIGN"] = signature
headers["KC-API-TIMESTAMP"] = timestamp
headers["KC-API-PASSPHRASE"] = self._passphrase
_, success, error = await AsyncHttpRequests.fetch(method, url, data=body, headers=headers, timeout=10)
if error:
return None, error
if success["code"] != "200000":
return None, success
return success["data"], error
def _generate_signature(self, nonce, method, path, data):
"""Generate the call signature."""
data = json.dumps(data) if data else ""
sig_str = "{}{}{}{}".format(nonce, method, path, data)
m = hmac.new(self._secret_key.encode("utf-8"), sig_str.encode("utf-8"), hashlib.sha256)
return base64.b64encode(m.digest()).decode("utf-8")
class KucoinTrade:
""" Kucoin Trade module. You can initialize trade object with some attributes in kwargs.
Attributes:
account: Account name for this trade exchange.
strategy: What's name would you want to created for you strategy.
symbol: Symbol name for your trade.
host: HTTP request host. (default is "https://openapi-v2.kucoin.com")
access_key: Account's ACCESS KEY.
secret_key: Account's SECRET KEY.
passphrase: API KEY passphrase.
asset_update_callback: You can use this param to specific a async callback function when you initializing Trade
object. `asset_update_callback` is like `async def on_asset_update_callback(asset: Asset): pass` and this
callback function will be executed asynchronous when received AssetEvent.
order_update_callback: You can use this param to specific a async callback function when you initializing Trade
object. `order_update_callback` is like `async def on_order_update_callback(order: Order): pass` and this
callback function will be executed asynchronous when some order state updated.
init_success_callback: You can use this param to specific a async callback function when you initializing Trade
object. `init_success_callback` is like `async def on_init_success_callback(success: bool, error: Error, **kwargs): pass`
and this callback function will be executed asynchronous after Trade module object initialized successfully.
check_order_interval: The interval time(seconds) for loop run task to check order status. (default is 2 seconds)
"""
def __init__(self, **kwargs):
"""Initialize."""
e = None
if not kwargs.get("account"):
e = Error("param account miss")
if not kwargs.get("strategy"):
e = Error("param strategy miss")
if not kwargs.get("symbol"):
e = Error("param symbol miss")
if not kwargs.get("host"):
kwargs["host"] = "https://openapi-v2.kucoin.com"
if not kwargs.get("access_key"):
e = Error("param access_key miss")
if not kwargs.get("secret_key"):
e = Error("param secret_key miss")
if not kwargs.get("passphrase"):
e = Error("param passphrase miss")
if e:
logger.error(e, caller=self)
if kwargs.get("init_success_callback"):
SingleTask.run(kwargs["init_success_callback"], False, e)
return
self._account = kwargs["account"]
self._strategy = kwargs["strategy"]
self._platform = KUCOIN
self._symbol = kwargs["symbol"]
self._host = kwargs["host"]
self._access_key = kwargs["access_key"]
self._secret_key = kwargs["secret_key"]
self._passphrase = kwargs["passphrase"]
self._asset_update_callback = kwargs.get("asset_update_callback")
self._order_update_callback = kwargs.get("order_update_callback")
self._init_success_callback = kwargs.get("init_success_callback")
self._check_order_interval = kwargs.get("check_order_interval", 2)
self._raw_symbol = self._symbol.replace("/", "-") # Raw symbol name.
self._assets = {} # Asset information. e.g. {"BTC": {"free": "1.1", "locked": "2.2", "total": "3.3"}, ... }
self._orders = {} # Order details. e.g. {order_no: order-object, ... }
# Initialize our REST API client.
self._rest_api = KucoinRestAPI(self._host, self._access_key, self._secret_key, self._passphrase)
# Create a loop run task to check order status.
LoopRunTask.register(self._check_order_update, self._check_order_interval)
# Subscribe asset event.
if self._asset_update_callback:
AssetSubscribe(self._platform, self._account, self.on_event_asset_update)
SingleTask.run(self._initialize)
@property
def assets(self):
return copy.copy(self._assets)
@property
def orders(self):
return copy.copy(self._orders)
@property
def rest_api(self):
return self._rest_api
async def _initialize(self):
""" Initialize. fetch all open order information."""
result, error = await self._rest_api.get_order_list(symbol=self._raw_symbol)
if error:
e = Error("get open order nos failed: {}".format(error))
logger.error(e, caller=self)
if self._init_success_callback:
SingleTask.run(self._init_success_callback, False, e)
return
for item in result["items"]:
if item["symbol"] != self._raw_symbol:
continue
await self._update_order(item)
if self._init_success_callback:
SingleTask.run(self._init_success_callback, True, None)
async def create_order(self, action, price, quantity, order_type=ORDER_TYPE_LIMIT, **kwargs):
""" Create an order.
Args:
action: Trade direction, BUY or SELL.
price: Price of order.
quantity: The buying or selling quantity.
order_type: order type, MARKET or LIMIT.
Returns:
order_no: Order ID if created successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
if action == ORDER_ACTION_BUY:
action_type = "buy"
elif action == ORDER_ACTION_SELL:
action_type = "sell"
else:
return None, "action error"
if order_type == ORDER_TYPE_MARKET:
order_type_2 = "market"
elif order_type == ORDER_TYPE_LIMIT:
order_type_2 = "limit"
else:
return None, "order_type error"
client_id = tools.get_uuid1()
price = tools.float_to_str(price)
quantity = tools.float_to_str(quantity)
success, error = await self._rest_api.create_order(client_id, action_type, self._raw_symbol, order_type_2,
price, quantity)
if error:
return None, error
order_no = success["orderId"]
infos = {
"account": self._account,
"platform": self._platform,
"strategy": self._strategy,
"order_no": order_no,
"symbol": self._symbol,
"action": action,
"price": price,
"quantity": quantity,
"order_type": order_type
}
order = Order(**infos)
self._orders[order_no] = order
if self._order_update_callback:
SingleTask.run(self._order_update_callback, copy.copy(order))
return order_no, None
async def revoke_order(self, *order_nos):
""" Revoke (an) order(s).
Args:
order_nos: Order id list, you can set this param to 0 or multiple items. If you set 0 param, you can cancel
all orders for this symbol(initialized in Trade object). If you set 1 param, you can cancel an order.
If you set multiple param, you can cancel multiple orders. Do not set param length more than 100.
Returns:
Success or error, see bellow.
"""
# If len(order_nos) == 0, you will cancel all orders for this symbol(initialized in Trade object).
if len(order_nos) == 0:
_, error = await self._rest_api.revoke_orders_all(self._raw_symbol)
if error:
return False, error
return True, None
# If len(order_nos) == 1, you will cancel an order.
if len(order_nos) == 1:
success, error = await self._rest_api.revoke_order(order_nos[0])
if error:
return order_nos[0], error
else:
return order_nos[0], None
# If len(order_nos) > 1, you will cancel multiple orders.
if len(order_nos) > 1:
s, e, = [], []
for order_no in order_nos:
success, error = await self._rest_api.revoke_order(order_no)
if error:
e.append(error)
else:
s.append(order_no)
return s, e
async def get_open_order_nos(self):
""" Get open order id list.
Args:
None.
Returns:
order_nos: Open order id list, otherwise it's None.
error: Error information, otherwise it's None.
"""
result, error = await self._rest_api.get_order_list(symbol=self._raw_symbol)
if error:
return False, error
order_nos = []
for item in result["items"]:
if item["symbol"] != self._raw_symbol:
continue
order_nos.append(item["id"])
return order_nos, None
async def _check_order_update(self, *args, **kwargs):
""" Loop run task for check order status.
"""
order_nos = list(self._orders.keys())
if not order_nos:
return
for order_no in order_nos:
success, error = await self._rest_api.get_order_detail(order_no)
if error:
return
await self._update_order(success)
@async_method_locker("KucoinTrade.order.locker")
async def _update_order(self, order_info):
""" Update order object.
Args:
order_info: Order information.
"""
if not order_info:
return
order_no = order_info["id"]
size = float(order_info["size"])
deal_size = float(order_info["dealSize"])
order = self._orders.get(order_no)
if not order:
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_no": order_no,
"action": ORDER_ACTION_BUY if order_info["side"] == "buy" else ORDER_ACTION_SELL,
"symbol": self._symbol,
"price": order_info["price"],
"quantity": order_info["size"],
"remain": order_info["size"],
"avg_price": order_info["price"]
}
order = Order(**info)
self._orders[order_no] = order
if order_info["isActive"]:
if size == deal_size:
status = ORDER_STATUS_SUBMITTED
else:
status = ORDER_STATUS_PARTIAL_FILLED
else:
if size == deal_size:
status = ORDER_STATUS_FILLED
else:
status = ORDER_STATUS_CANCELED
if status != order.status:
order.status = status
order.remain = size - deal_size
order.ctime = order_info["createdAt"]
order.utime = tools.get_cur_timestamp_ms()
SingleTask.run(self._order_update_callback, copy.copy(order))
# Delete order that already completed.
if order.status in [ORDER_STATUS_FAILED, ORDER_STATUS_CANCELED, ORDER_STATUS_FILLED]:
self._orders.pop(order_no)
async def on_event_asset_update(self, asset: Asset):
""" Asset update callback.
Args:
asset: Asset object.
"""
self._assets = asset
SingleTask.run(self._asset_update_callback, asset)
| 37.412371
| 133
| 0.596445
|
import json
import copy
import hmac
import base64
import hashlib
from urllib.parse import urljoin
from quant.error import Error
from quant.utils import tools
from quant.utils import logger
from quant.const import KUCOIN
from quant.order import Order
from quant.asset import Asset, AssetSubscribe
from quant.tasks import SingleTask, LoopRunTask
from quant.utils.http_client import AsyncHttpRequests
from quant.utils.decorator import async_method_locker
from quant.order import ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET
from quant.order import ORDER_ACTION_BUY, ORDER_ACTION_SELL
from quant.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, \
ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED, ORDER_STATUS_NONE
__all__ = ("KucoinRestAPI", "KucoinTrade", )
class KucoinRestAPI:
def __init__(self, host, access_key, secret_key, passphrase):
self._host = host
self._access_key = access_key
self._secret_key = secret_key
self._passphrase = passphrase
async def get_sub_users(self):
uri = "/api/v1/sub/user"
success, error = await self.request("GET", uri, auth=True)
return success, error
async def get_accounts(self, account_type=None, currency=None):
uri = "/api/v1/accounts"
params = {}
if account_type:
params["type"] = account_type
if currency:
params["currency"] = currency
success, error = await self.request("GET", uri, params=params, auth=True)
return success, error
async def get_account(self, account_id):
uri = "/api/v1/accounts/{}".format(account_id)
success, error = await self.request("GET", uri, auth=True)
return success, error
async def create_account(self, account_type, currency):
uri = "/api/v1/accounts"
body = {
"type": account_type,
"currency": currency
}
success, error = await self.request("POST", uri, body=body, auth=True)
return success, error
async def create_order(self, client_id, side, symbol, order_type, price, size):
uri = "/api/v1/orders"
body = {
"clientOid": client_id,
"side": side,
"symbol": symbol,
"type": order_type,
"price": price,
"size": size
}
success, error = await self.request("POST", uri, body=body, auth=True)
return success, error
async def revoke_order(self, order_id):
uri = "/api/v1/orders/{}".format(order_id)
success, error = await self.request("DELETE", uri, auth=True)
return success, error
async def revoke_orders_all(self, symbol=None):
uri = "/api/v1/orders"
params = {}
if symbol:
params["symbol"] = symbol
success, error = await self.request("DELETE", uri, params=params, auth=True)
return success, error
async def get_order_list(self, status="active", symbol=None, order_type=None, start=None, end=None):
uri = "/api/v1/orders"
params = {"status": status}
if symbol:
params["symbol"] = symbol
if order_type:
params["type"] = order_type
if start:
params["startAt"] = start
if end:
params["endAt"] = end
success, error = await self.request("GET", uri, params=params, auth=True)
return success, error
async def get_order_detail(self, order_id):
uri = "/api/v1/orders/{}".format(order_id)
success, error = await self.request("GET", uri, auth=True)
return success, error
async def get_websocket_token(self, private=False):
if private:
uri = "/api/v1/bullet-private"
success, error = await self.request("POST", uri, auth=True)
else:
uri = "/api/v1/bullet-public"
success, error = await self.request("POST", uri)
return success, error
async def get_orderbook(self, symbol, count=20):
if count == 20:
uri = "/api/v1/market/orderbook/level2_20?symbol={}".format(symbol)
else:
uri = "/api/v2/market/orderbook/level2_100?symbol={}".format(symbol)
success, error = await self.request("GET", uri)
return success, error
async def request(self, method, uri, params=None, body=None, headers=None, auth=False):
if params:
query = "&".join(["{}={}".format(k, params[k]) for k in sorted(params.keys())])
uri += "?" + query
url = urljoin(self._host, uri)
if auth:
if not headers:
headers = {}
timestamp = str(tools.get_cur_timestamp_ms())
signature = self._generate_signature(timestamp, method, uri, body)
headers["KC-API-KEY"] = self._access_key
headers["KC-API-SIGN"] = signature
headers["KC-API-TIMESTAMP"] = timestamp
headers["KC-API-PASSPHRASE"] = self._passphrase
_, success, error = await AsyncHttpRequests.fetch(method, url, data=body, headers=headers, timeout=10)
if error:
return None, error
if success["code"] != "200000":
return None, success
return success["data"], error
def _generate_signature(self, nonce, method, path, data):
data = json.dumps(data) if data else ""
sig_str = "{}{}{}{}".format(nonce, method, path, data)
m = hmac.new(self._secret_key.encode("utf-8"), sig_str.encode("utf-8"), hashlib.sha256)
return base64.b64encode(m.digest()).decode("utf-8")
class KucoinTrade:
def __init__(self, **kwargs):
e = None
if not kwargs.get("account"):
e = Error("param account miss")
if not kwargs.get("strategy"):
e = Error("param strategy miss")
if not kwargs.get("symbol"):
e = Error("param symbol miss")
if not kwargs.get("host"):
kwargs["host"] = "https://openapi-v2.kucoin.com"
if not kwargs.get("access_key"):
e = Error("param access_key miss")
if not kwargs.get("secret_key"):
e = Error("param secret_key miss")
if not kwargs.get("passphrase"):
e = Error("param passphrase miss")
if e:
logger.error(e, caller=self)
if kwargs.get("init_success_callback"):
SingleTask.run(kwargs["init_success_callback"], False, e)
return
self._account = kwargs["account"]
self._strategy = kwargs["strategy"]
self._platform = KUCOIN
self._symbol = kwargs["symbol"]
self._host = kwargs["host"]
self._access_key = kwargs["access_key"]
self._secret_key = kwargs["secret_key"]
self._passphrase = kwargs["passphrase"]
self._asset_update_callback = kwargs.get("asset_update_callback")
self._order_update_callback = kwargs.get("order_update_callback")
self._init_success_callback = kwargs.get("init_success_callback")
self._check_order_interval = kwargs.get("check_order_interval", 2)
self._raw_symbol = self._symbol.replace("/", "-")
self._assets = {}
self._orders = {}
self._rest_api = KucoinRestAPI(self._host, self._access_key, self._secret_key, self._passphrase)
LoopRunTask.register(self._check_order_update, self._check_order_interval)
if self._asset_update_callback:
AssetSubscribe(self._platform, self._account, self.on_event_asset_update)
SingleTask.run(self._initialize)
@property
def assets(self):
return copy.copy(self._assets)
@property
def orders(self):
return copy.copy(self._orders)
@property
def rest_api(self):
return self._rest_api
async def _initialize(self):
result, error = await self._rest_api.get_order_list(symbol=self._raw_symbol)
if error:
e = Error("get open order nos failed: {}".format(error))
logger.error(e, caller=self)
if self._init_success_callback:
SingleTask.run(self._init_success_callback, False, e)
return
for item in result["items"]:
if item["symbol"] != self._raw_symbol:
continue
await self._update_order(item)
if self._init_success_callback:
SingleTask.run(self._init_success_callback, True, None)
async def create_order(self, action, price, quantity, order_type=ORDER_TYPE_LIMIT, **kwargs):
if action == ORDER_ACTION_BUY:
action_type = "buy"
elif action == ORDER_ACTION_SELL:
action_type = "sell"
else:
return None, "action error"
if order_type == ORDER_TYPE_MARKET:
order_type_2 = "market"
elif order_type == ORDER_TYPE_LIMIT:
order_type_2 = "limit"
else:
return None, "order_type error"
client_id = tools.get_uuid1()
price = tools.float_to_str(price)
quantity = tools.float_to_str(quantity)
success, error = await self._rest_api.create_order(client_id, action_type, self._raw_symbol, order_type_2,
price, quantity)
if error:
return None, error
order_no = success["orderId"]
infos = {
"account": self._account,
"platform": self._platform,
"strategy": self._strategy,
"order_no": order_no,
"symbol": self._symbol,
"action": action,
"price": price,
"quantity": quantity,
"order_type": order_type
}
order = Order(**infos)
self._orders[order_no] = order
if self._order_update_callback:
SingleTask.run(self._order_update_callback, copy.copy(order))
return order_no, None
async def revoke_order(self, *order_nos):
if len(order_nos) == 0:
_, error = await self._rest_api.revoke_orders_all(self._raw_symbol)
if error:
return False, error
return True, None
if len(order_nos) == 1:
success, error = await self._rest_api.revoke_order(order_nos[0])
if error:
return order_nos[0], error
else:
return order_nos[0], None
if len(order_nos) > 1:
s, e, = [], []
for order_no in order_nos:
success, error = await self._rest_api.revoke_order(order_no)
if error:
e.append(error)
else:
s.append(order_no)
return s, e
async def get_open_order_nos(self):
result, error = await self._rest_api.get_order_list(symbol=self._raw_symbol)
if error:
return False, error
order_nos = []
for item in result["items"]:
if item["symbol"] != self._raw_symbol:
continue
order_nos.append(item["id"])
return order_nos, None
async def _check_order_update(self, *args, **kwargs):
order_nos = list(self._orders.keys())
if not order_nos:
return
for order_no in order_nos:
success, error = await self._rest_api.get_order_detail(order_no)
if error:
return
await self._update_order(success)
@async_method_locker("KucoinTrade.order.locker")
async def _update_order(self, order_info):
if not order_info:
return
order_no = order_info["id"]
size = float(order_info["size"])
deal_size = float(order_info["dealSize"])
order = self._orders.get(order_no)
if not order:
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_no": order_no,
"action": ORDER_ACTION_BUY if order_info["side"] == "buy" else ORDER_ACTION_SELL,
"symbol": self._symbol,
"price": order_info["price"],
"quantity": order_info["size"],
"remain": order_info["size"],
"avg_price": order_info["price"]
}
order = Order(**info)
self._orders[order_no] = order
if order_info["isActive"]:
if size == deal_size:
status = ORDER_STATUS_SUBMITTED
else:
status = ORDER_STATUS_PARTIAL_FILLED
else:
if size == deal_size:
status = ORDER_STATUS_FILLED
else:
status = ORDER_STATUS_CANCELED
if status != order.status:
order.status = status
order.remain = size - deal_size
order.ctime = order_info["createdAt"]
order.utime = tools.get_cur_timestamp_ms()
SingleTask.run(self._order_update_callback, copy.copy(order))
if order.status in [ORDER_STATUS_FAILED, ORDER_STATUS_CANCELED, ORDER_STATUS_FILLED]:
self._orders.pop(order_no)
async def on_event_asset_update(self, asset: Asset):
self._assets = asset
SingleTask.run(self._asset_update_callback, asset)
| true
| true
|
f7195fe4de87239beab23f5be618730dc300a65f
| 14,645
|
py
|
Python
|
RAdam.py
|
blnm/RSE
|
6a3f0dd858ea4b6dafcfb1d97bb979e101d9911c
|
[
"MIT"
] | 40
|
2020-04-24T01:03:12.000Z
|
2022-03-20T18:19:30.000Z
|
RAdam.py
|
blnm/RSE
|
6a3f0dd858ea4b6dafcfb1d97bb979e101d9911c
|
[
"MIT"
] | 4
|
2021-09-09T13:26:09.000Z
|
2022-03-31T18:37:05.000Z
|
RAdam.py
|
blnm/RSE
|
6a3f0dd858ea4b6dafcfb1d97bb979e101d9911c
|
[
"MIT"
] | 7
|
2020-11-25T14:26:09.000Z
|
2022-01-29T10:18:40.000Z
|
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
__all__ = ['RAdamOptimizer']
class RAdamOptimizer(optimizer.Optimizer):
"""RAdam optimizer.
According to the paper
[On The Variance Of The Adaptive Learning Rate And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).
"""
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
L2_decay=0.,
amsgrad=False,
total_steps=0,
warmup_proportion=0.1,
min_lr=0.,
use_locking=False,
name="RAdam",
decay_vars=None,
L1_decay=0.0,
clip_gradients=False, clip_multiplier=3.0, clip_epsilon=1e-2):
r"""Construct a new Adam optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
L2_decay: A floating point value. Weight decay for each param.
amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond".
total_steps: An integer. Total number of training steps.
Enable warmup by setting a positive value.
warmup_proportion: A floating point value. The proportion of increasing steps.
min_lr: A floating point value. Minimum learning rate after warmup.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be
a callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super(RAdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._weight_decay = L2_decay
self._L1_decay = L1_decay
self._amsgrad = amsgrad
self._total_steps = float(total_steps)
self._warmup_proportion = warmup_proportion
self._min_lr = min_lr
self._initial_weight_decay = L2_decay
self._initial_total_steps = total_steps
self.clip_multiplier = clip_multiplier
self.clip_epsilon = clip_epsilon
self.clip_gradients = clip_gradients
self.clip_multiplier_t = ops.convert_to_tensor(self.clip_multiplier, name="clip_multiplier")
self.clip_epsilon_t = ops.convert_to_tensor(self.clip_epsilon, name="clip_epsilon")
self._lr_t = None
self._step_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
self._weight_decay_t = None
self._total_steps_t = None
self._warmup_proportion_t = None
self._min_lr_t = None
self.reg_vars = set(decay_vars) if decay_vars is not None else set()
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("step", graph=graph),
self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots_internal(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=1.0, name="step", colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._beta1, name="beta1_power", colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._beta2, name="beta2_power", colocate_with=first_var)
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
if self._amsgrad:
self._zeros_slot(v, "vhat", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
beta1 = self._call_if_callable(self._beta1)
beta2 = self._call_if_callable(self._beta2)
epsilon = self._call_if_callable(self._epsilon)
weight_decay = self._call_if_callable(self._weight_decay)
total_steps = self._call_if_callable(self._total_steps)
warmup_proportion = self._call_if_callable(self._warmup_proportion)
min_lr = self._call_if_callable(self._min_lr)
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
self._weight_decay_t = ops.convert_to_tensor(weight_decay, name="weight_decay")
self._total_steps_t = ops.convert_to_tensor(total_steps, name="total_steps")
self._warmup_proportion_t = ops.convert_to_tensor(warmup_proportion, name="warmup_proportion")
self._min_lr_t = ops.convert_to_tensor(min_lr, name="min_lr")
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
tvars = list(zip(*grads_and_vars))[1]
self._create_slots_internal(tvars)
return super().apply_gradients(grads_and_vars, global_step, name)
def _apply_dense(self, grad, var):
return self._resource_apply_dense(grad, var)
def _resource_apply_dense(self, grad, var):
step, beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
if self._initial_total_steps > 0:
total_steps = math_ops.cast(self._total_steps_t, var.dtype.base_dtype)
warmup_proportion = math_ops.cast(self._warmup_proportion_t, var.dtype.base_dtype)
min_lr = math_ops.cast(self._min_lr_t, var.dtype.base_dtype)
warmup_steps = total_steps * warmup_proportion
decay_steps = math_ops.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
step <= warmup_steps,
lr_t * (step / warmup_steps),
lr_t + decay_rate * math_ops.minimum(step - warmup_steps, decay_steps),
)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
v = self.get_slot(var, "v")
if self.clip_gradients:
clipVal = math_ops.sqrt(
tf.reduce_sum(v) / (1.0 - beta2_power)) * self.clip_multiplier_t + self.clip_epsilon_t
grad = clip_ops.clip_by_norm(grad, clipVal)
sma_inf = 2.0 / (1.0 - beta2_t) - 1.0
sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)
m = self.get_slot(var, "m")
v_t = state_ops.assign(v, beta2_t * v + (1.0 - beta2_t) * math_ops.square(grad), use_locking=self._use_locking)
v_corr_t = math_ops.sqrt(v_t / (1.0 - beta2_power)) + epsilon_t
grad_corr = grad / v_corr_t
m_t = state_ops.assign(m, beta1_t * m + (1.0 - beta1_t) * grad_corr, use_locking=self._use_locking)
m_corr_t = m_t / (1.0 - beta1_power)
r_t = math_ops.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *
(sma_t - 2.0) / (sma_inf - 2.0) *
sma_inf / sma_t)
var_t = tf.where(sma_t >= 5.0, r_t * m_corr_t, m_corr_t)
if var in self.reg_vars:
if self._initial_weight_decay > 0.0:
var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var
if self._L1_decay > 0.0:
var_t += math_ops.cast(self._L1_decay, var.dtype.base_dtype) * math_ops.sign(var)
with tf.control_dependencies([var_t]):
var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)
updates = [var_update, m_t, v_t]
return control_flow_ops.group(*updates)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
step, beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
if self._initial_total_steps > 0:
total_steps = math_ops.cast(self._total_steps_t, var.dtype.base_dtype)
warmup_proportion = math_ops.cast(self._warmup_proportion_t, var.dtype.base_dtype)
min_lr = math_ops.cast(self._min_lr_t, var.dtype.base_dtype)
warmup_steps = total_steps * warmup_proportion
decay_steps = math_ops.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
step <= warmup_steps,
lr_t * (step / warmup_steps),
lr_t + decay_rate * math_ops.minimum(step - warmup_steps, decay_steps),
)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
v = self.get_slot(var, "v")
if self.clip_gradients:
clipVal = math_ops.sqrt(
tf.reduce_sum(v) / (1.0 - beta2_power)) * self.clip_multiplier_t + self.clip_epsilon_t
grad = clip_ops.clip_by_norm(grad, clipVal)
sma_inf = 2.0 / (1.0 - beta2_t) - 1.0
sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
m_corr_t = m_t / (1.0 - beta1_power)
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
if self._amsgrad:
vhat = self.get_slot(var, 'vhat')
vhat_t = state_ops.assign(vhat, math_ops.maximum(vhat, v_t), use_locking=self._use_locking)
v_corr_t = math_ops.sqrt(vhat_t / (1.0 - beta2_power)) + epsilon_t
else:
v_corr_t = math_ops.sqrt(v_t / (1.0 - beta2_power)) + epsilon_t
r_t = math_ops.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *
(sma_t - 2.0) / (sma_inf - 2.0) *
sma_inf / sma_t)
var_t = tf.where(sma_t >= 5.0, r_t * m_corr_t / v_corr_t, m_corr_t)
if var in self.reg_vars:
if self._initial_weight_decay > 0.0:
var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var
if self._L1_decay > 0.0:
var_t += math_ops.cast(self._L1_decay, var.dtype.base_dtype) * math_ops.sign(var)
var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)
updates = [var_update, m_t, v_t]
if self._amsgrad:
updates.append(vhat_t)
return control_flow_ops.group(*updates)
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values,
var,
grad.indices,
lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies([resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(grad, var, indices, self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
with ops.control_dependencies(update_ops):
step, beta1_power, beta2_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_step = step.assign(step + 1.0, use_locking=self._use_locking)
update_beta1 = beta1_power.assign(beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = beta2_power.assign(beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_step, update_beta1, update_beta2], name=name_scope)
| 49.476351
| 120
| 0.629157
|
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
__all__ = ['RAdamOptimizer']
class RAdamOptimizer(optimizer.Optimizer):
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
L2_decay=0.,
amsgrad=False,
total_steps=0,
warmup_proportion=0.1,
min_lr=0.,
use_locking=False,
name="RAdam",
decay_vars=None,
L1_decay=0.0,
clip_gradients=False, clip_multiplier=3.0, clip_epsilon=1e-2):
super(RAdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._weight_decay = L2_decay
self._L1_decay = L1_decay
self._amsgrad = amsgrad
self._total_steps = float(total_steps)
self._warmup_proportion = warmup_proportion
self._min_lr = min_lr
self._initial_weight_decay = L2_decay
self._initial_total_steps = total_steps
self.clip_multiplier = clip_multiplier
self.clip_epsilon = clip_epsilon
self.clip_gradients = clip_gradients
self.clip_multiplier_t = ops.convert_to_tensor(self.clip_multiplier, name="clip_multiplier")
self.clip_epsilon_t = ops.convert_to_tensor(self.clip_epsilon, name="clip_epsilon")
self._lr_t = None
self._step_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
self._weight_decay_t = None
self._total_steps_t = None
self._warmup_proportion_t = None
self._min_lr_t = None
self.reg_vars = set(decay_vars) if decay_vars is not None else set()
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("step", graph=graph),
self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots_internal(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=1.0, name="step", colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._beta1, name="beta1_power", colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._beta2, name="beta2_power", colocate_with=first_var)
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
if self._amsgrad:
self._zeros_slot(v, "vhat", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
beta1 = self._call_if_callable(self._beta1)
beta2 = self._call_if_callable(self._beta2)
epsilon = self._call_if_callable(self._epsilon)
weight_decay = self._call_if_callable(self._weight_decay)
total_steps = self._call_if_callable(self._total_steps)
warmup_proportion = self._call_if_callable(self._warmup_proportion)
min_lr = self._call_if_callable(self._min_lr)
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
self._weight_decay_t = ops.convert_to_tensor(weight_decay, name="weight_decay")
self._total_steps_t = ops.convert_to_tensor(total_steps, name="total_steps")
self._warmup_proportion_t = ops.convert_to_tensor(warmup_proportion, name="warmup_proportion")
self._min_lr_t = ops.convert_to_tensor(min_lr, name="min_lr")
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
tvars = list(zip(*grads_and_vars))[1]
self._create_slots_internal(tvars)
return super().apply_gradients(grads_and_vars, global_step, name)
def _apply_dense(self, grad, var):
return self._resource_apply_dense(grad, var)
def _resource_apply_dense(self, grad, var):
step, beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
if self._initial_total_steps > 0:
total_steps = math_ops.cast(self._total_steps_t, var.dtype.base_dtype)
warmup_proportion = math_ops.cast(self._warmup_proportion_t, var.dtype.base_dtype)
min_lr = math_ops.cast(self._min_lr_t, var.dtype.base_dtype)
warmup_steps = total_steps * warmup_proportion
decay_steps = math_ops.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
step <= warmup_steps,
lr_t * (step / warmup_steps),
lr_t + decay_rate * math_ops.minimum(step - warmup_steps, decay_steps),
)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
v = self.get_slot(var, "v")
if self.clip_gradients:
clipVal = math_ops.sqrt(
tf.reduce_sum(v) / (1.0 - beta2_power)) * self.clip_multiplier_t + self.clip_epsilon_t
grad = clip_ops.clip_by_norm(grad, clipVal)
sma_inf = 2.0 / (1.0 - beta2_t) - 1.0
sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)
m = self.get_slot(var, "m")
v_t = state_ops.assign(v, beta2_t * v + (1.0 - beta2_t) * math_ops.square(grad), use_locking=self._use_locking)
v_corr_t = math_ops.sqrt(v_t / (1.0 - beta2_power)) + epsilon_t
grad_corr = grad / v_corr_t
m_t = state_ops.assign(m, beta1_t * m + (1.0 - beta1_t) * grad_corr, use_locking=self._use_locking)
m_corr_t = m_t / (1.0 - beta1_power)
r_t = math_ops.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *
(sma_t - 2.0) / (sma_inf - 2.0) *
sma_inf / sma_t)
var_t = tf.where(sma_t >= 5.0, r_t * m_corr_t, m_corr_t)
if var in self.reg_vars:
if self._initial_weight_decay > 0.0:
var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var
if self._L1_decay > 0.0:
var_t += math_ops.cast(self._L1_decay, var.dtype.base_dtype) * math_ops.sign(var)
with tf.control_dependencies([var_t]):
var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)
updates = [var_update, m_t, v_t]
return control_flow_ops.group(*updates)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
step, beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
if self._initial_total_steps > 0:
total_steps = math_ops.cast(self._total_steps_t, var.dtype.base_dtype)
warmup_proportion = math_ops.cast(self._warmup_proportion_t, var.dtype.base_dtype)
min_lr = math_ops.cast(self._min_lr_t, var.dtype.base_dtype)
warmup_steps = total_steps * warmup_proportion
decay_steps = math_ops.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
step <= warmup_steps,
lr_t * (step / warmup_steps),
lr_t + decay_rate * math_ops.minimum(step - warmup_steps, decay_steps),
)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
v = self.get_slot(var, "v")
if self.clip_gradients:
clipVal = math_ops.sqrt(
tf.reduce_sum(v) / (1.0 - beta2_power)) * self.clip_multiplier_t + self.clip_epsilon_t
grad = clip_ops.clip_by_norm(grad, clipVal)
sma_inf = 2.0 / (1.0 - beta2_t) - 1.0
sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
m_corr_t = m_t / (1.0 - beta1_power)
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
if self._amsgrad:
vhat = self.get_slot(var, 'vhat')
vhat_t = state_ops.assign(vhat, math_ops.maximum(vhat, v_t), use_locking=self._use_locking)
v_corr_t = math_ops.sqrt(vhat_t / (1.0 - beta2_power)) + epsilon_t
else:
v_corr_t = math_ops.sqrt(v_t / (1.0 - beta2_power)) + epsilon_t
r_t = math_ops.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *
(sma_t - 2.0) / (sma_inf - 2.0) *
sma_inf / sma_t)
var_t = tf.where(sma_t >= 5.0, r_t * m_corr_t / v_corr_t, m_corr_t)
if var in self.reg_vars:
if self._initial_weight_decay > 0.0:
var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var
if self._L1_decay > 0.0:
var_t += math_ops.cast(self._L1_decay, var.dtype.base_dtype) * math_ops.sign(var)
var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)
updates = [var_update, m_t, v_t]
if self._amsgrad:
updates.append(vhat_t)
return control_flow_ops.group(*updates)
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values,
var,
grad.indices,
lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies([resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(grad, var, indices, self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
with ops.control_dependencies(update_ops):
step, beta1_power, beta2_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_step = step.assign(step + 1.0, use_locking=self._use_locking)
update_beta1 = beta1_power.assign(beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = beta2_power.assign(beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_step, update_beta1, update_beta2], name=name_scope)
| true
| true
|
f7196003d3c4be36ba4db4ea82b6856d51483928
| 18,496
|
py
|
Python
|
btclib/ecc/ssa.py
|
dginst/btclib
|
70932afe32167449e369d4e2911b1bf741c0f5d2
|
[
"MIT"
] | 16
|
2019-01-04T22:21:17.000Z
|
2020-02-01T10:41:28.000Z
|
btclib/ecc/ssa.py
|
dginst/BitcoinBlockchainTechnology
|
70932afe32167449e369d4e2911b1bf741c0f5d2
|
[
"MIT"
] | 20
|
2018-05-24T18:47:12.000Z
|
2018-12-22T09:52:09.000Z
|
btclib/ecc/ssa.py
|
dginst/BitcoinBlockchainTechnology
|
70932afe32167449e369d4e2911b1bf741c0f5d2
|
[
"MIT"
] | 9
|
2018-05-16T09:53:32.000Z
|
2019-01-03T13:49:37.000Z
|
#!/usr/bin/env python3
# Copyright (C) 2017-2022 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Elliptic Curve Schnorr Signature Algorithm (ECSSA).
This implementation is according to BIP340-Schnorr:
https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki
Differently from ECDSA, the BIP340-Schnorr scheme supports
messages of size hf_size only.
It also uses as public key the x-coordinate (field element)
of the curve point associated to the private key 0 < q < n.
Therefore, for sepcp256k1 the public key size is 32 bytes.
Arguably, the knowledge of q as the discrete logarithm of Q
also implies the knowledge of n-q as discrete logarithm of -Q.
As such, {q, n-q} can be considered a single private key and
{Q, -Q} the associated public key characterized by the shared x_Q.
Also, BIP340 advocates its own SHA256 modification as hash function:
TaggedHash(tag, x) = SHA256(SHA256(tag)||SHA256(tag)||x)
The rationale is to make BIP340 signatures invalid for anything else
but Bitcoin and vice versa.
TaggedHash is used for both the challenge (with tag 'BIPSchnorr')
and the deterministic nonce (with tag 'BIPSchnorrDerive').
To allow for secure batch verification of multiple signatures,
BIP340-Schnorr uses a challenge that prevents public key recovery
from signature: c = TaggedHash('BIPSchnorr', x_k||x_Q||msg).
A custom deterministic algorithm for the ephemeral key (nonce)
is used for signing, instead of the RFC6979 standard:
nonce = TaggedHash('BIPSchnorrDerive', q||msg)
Finally, BIP340-Schnorr adopts a robust [r][s] custom serialization
format, instead of the loosely specified ASN.1 DER standard.
The signature size is p-size*n-size, where p-size is the field element
(curve point coordinate) byte size and n-size is the scalar
(curve point multiplication coefficient) byte size.
For sepcp256k1 the resulting signature size is 64 bytes.
"""
import secrets
from dataclasses import InitVar, dataclass
from hashlib import sha256
from typing import List, Optional, Sequence, Tuple, Type, Union
from btclib.alias import BinaryData, HashF, Integer, JacPoint, Octets, Point
from btclib.bip32.bip32 import BIP32Key
from btclib.ecc.curve import Curve, secp256k1
from btclib.ecc.curve_group import _double_mult, _mult, _multi_mult
from btclib.ecc.number_theory import mod_inv
from btclib.exceptions import BTClibRuntimeError, BTClibTypeError, BTClibValueError
from btclib.hashes import reduce_to_hlen, tagged_hash
from btclib.to_prv_key import PrvKey, int_from_prv_key
from btclib.to_pub_key import point_from_pub_key
from btclib.utils import (
bytes_from_octets,
bytesio_from_binarydata,
hex_string,
int_from_bits,
)
@dataclass(frozen=True)
class Sig:
"""BIP340-Schnorr signature.
- r is an x-coordinate _field_element_, 0 <= r < ec.p
- s is a scalar, 0 <= s < ec.n (yes, for BIP340-Schnorr it can be zero)
(ec.p is the field prime, ec.n is the curve order)
"""
# 32 bytes x-coordinate field element
r: int
# 32 bytes scalar
s: int
ec: Curve = secp256k1
check_validity: InitVar[bool] = True
def __post_init__(self, check_validity: bool) -> None:
if check_validity:
self.assert_valid()
def assert_valid(self) -> None:
# r is a field element, fail if r is not a valid x-coordinate
self.ec.y(self.r)
# s is a scalar, fail if s is not in [0, n-1]
if not 0 <= self.s < self.ec.n:
err_msg = "scalar s not in 0..n-1: "
err_msg += f"'{hex_string(self.s)}'" if self.s > 0xFFFFFFFF else f"{self.s}"
raise BTClibValueError(err_msg)
def serialize(self, check_validity: bool = True) -> bytes:
if check_validity:
self.assert_valid()
out = self.r.to_bytes(self.ec.p_size, byteorder="big", signed=False)
out += self.s.to_bytes(self.ec.n_size, byteorder="big", signed=False)
return out
@classmethod
def parse(cls: Type["Sig"], data: BinaryData, check_validity: bool = True) -> "Sig":
stream = bytesio_from_binarydata(data)
ec = secp256k1
r = int.from_bytes(stream.read(ec.p_size), byteorder="big", signed=False)
s = int.from_bytes(stream.read(ec.n_size), byteorder="big", signed=False)
return cls(r, s, ec, check_validity)
# hex-string or bytes representation of an int
# 33 or 65 bytes or hex-string
# BIP32Key as dict or String
# tuple Point
BIP340PubKey = Union[Integer, Octets, BIP32Key, Point]
def point_from_bip340pub_key(x_Q: BIP340PubKey, ec: Curve = secp256k1) -> Point:
"""Return a verified-as-valid BIP340 public key as Point tuple.
It supports:
- BIP32 extended keys (bytes, string, or BIP32KeyData)
- SEC Octets (bytes or hex-string, with 02, 03, or 04 prefix)
- BIP340 Octets (bytes or hex-string, p-size Point x-coordinate)
- native tuple
"""
# BIP 340 key as integer
if isinstance(x_Q, int):
return x_Q, ec.y_even(x_Q)
# (tuple) Point, (dict or str) BIP32Key, or 33/65 bytes
try:
x_Q = point_from_pub_key(x_Q, ec)[0]
return x_Q, ec.y_even(x_Q)
except BTClibValueError:
pass
# BIP 340 key as bytes or hex-string
if isinstance(x_Q, (str, bytes)):
Q = bytes_from_octets(x_Q, ec.p_size)
x_Q = int.from_bytes(Q, "big", signed=False)
return x_Q, ec.y_even(x_Q)
raise BTClibTypeError("not a BIP340 public key")
def gen_keys_(
prv_key: Optional[PrvKey] = None, ec: Curve = secp256k1
) -> Tuple[int, int, JacPoint]:
"Return a BIP340 private/public (int, JacPoint) key-pair."
if prv_key is None:
q = 1 + secrets.randbelow(ec.n - 1)
else:
q = int_from_prv_key(prv_key, ec)
QJ = _mult(q, ec.GJ, ec)
x_Q, y_Q = ec.aff_from_jac(QJ)
if y_Q % 2:
q = ec.n - q
QJ = ec.negate_jac(QJ)
return q, x_Q, QJ
def gen_keys(
prv_key: Optional[PrvKey] = None, ec: Curve = secp256k1
) -> Tuple[int, int]:
"Return a BIP340 private/public (int, int) key-pair."
q, x_Q, _ = gen_keys_(prv_key, ec)
return q, x_Q
def _det_nonce_(
msg_hash: bytes, q: int, Q: int, aux: bytes, ec: Curve, hf: HashF
) -> int:
# assume the random oracle model for the hash function,
# i.e. hash values can be considered uniformly random
# Note that in general, taking a uniformly random integer
# modulo the curve order n would produce a biased result.
# However, if the order n is sufficiently close to 2^hf_len,
# then the bias is not observable:
# e.g. for secp256k1 and sha256 1-n/2^256 it is about 1.27*2^-128
#
# the unbiased implementation is provided here,
# which works also for very-low-cardinality test curves
randomizer = tagged_hash("BIP0340/aux".encode(), aux, hf)
xor = q ^ int.from_bytes(randomizer, "big", signed=False)
max_len = max(ec.n_size, hf().digest_size)
t = b"".join(
[
xor.to_bytes(max_len, byteorder="big", signed=False),
Q.to_bytes(ec.p_size, byteorder="big", signed=False),
msg_hash,
]
)
nonce_tag = "BIP0340/nonce".encode()
while True:
t = tagged_hash(nonce_tag, t, hf)
# The following lines would introduce a bias
# nonce = int.from_bytes(t, 'big') % ec.n
# nonce = int_from_bits(t, ec.nlen) % ec.n
# In general, taking a uniformly random integer (like those
# obtained from a hash function in the random oracle model)
# modulo the curve order n would produce a biased result.
# However, if the order n is sufficiently close to 2^hf_len,
# then the bias is not observable: e.g.
# for secp256k1 and sha256 1-n/2^256 it is about 1.27*2^-128
nonce = int_from_bits(t, ec.nlen) # candidate nonce
if 0 < nonce < ec.n: # acceptable value for nonce
return nonce # successful candidate
def det_nonce_(
msg_hash: Octets,
prv_key: PrvKey,
aux: Optional[Octets] = None,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> int:
"Return a BIP340 deterministic ephemeral key (nonce)."
# the message msg_hash: a hf_len array
hf_len = hf().digest_size
msg_hash = bytes_from_octets(msg_hash, hf_len)
q, Q = gen_keys(prv_key, ec)
# the auxiliary random component
aux = secrets.token_bytes(hf_len) if aux is None else bytes_from_octets(aux)
return _det_nonce_(msg_hash, q, Q, aux, ec, hf)
def challenge_(msg_hash: Octets, x_Q: int, x_K: int, ec: Curve, hf: HashF) -> int:
# the message msg_hash: a hf_len array
hf_len = hf().digest_size
msg_hash = bytes_from_octets(msg_hash, hf_len)
t = b"".join(
[
x_K.to_bytes(ec.p_size, byteorder="big", signed=False),
x_Q.to_bytes(ec.p_size, byteorder="big", signed=False),
msg_hash,
]
)
t = tagged_hash("BIP0340/challenge".encode(), t, hf)
c = int_from_bits(t, ec.nlen) % ec.n
if c == 0:
raise BTClibRuntimeError("invalid zero challenge") # pragma: no cover
return c
def _sign_(c: int, q: int, nonce: int, r: int, ec: Curve) -> Sig:
# Private function for testing purposes: it allows to explore all
# possible value of the challenge c (for low-cardinality curves).
# It assume that c is in [1, n-1], while q and nonce are in [1, n-1]
if c == 0: # c≠0 required as it multiplies the private key
raise BTClibRuntimeError("invalid zero challenge")
# s=0 is ok: in verification there is no inverse of s
s = (nonce + c * q) % ec.n
return Sig(r, s, ec)
def sign_(
msg_hash: Octets,
prv_key: PrvKey,
nonce: Optional[PrvKey] = None,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> Sig:
"""Sign a hf_len bytes message according to BIP340 signature algorithm.
If the deterministic nonce is not provided,
the BIP340 specification (not RFC6979) is used.
"""
# the message msg_hash: a hf_len array
hf_len = hf().digest_size
msg_hash = bytes_from_octets(msg_hash, hf_len)
# private and public keys
q, x_Q = gen_keys(prv_key, ec)
# nonce: an integer in the range 1..n-1.
if nonce is None:
nonce = _det_nonce_(msg_hash, q, x_Q, secrets.token_bytes(hf_len), ec, hf)
nonce, x_K = gen_keys(nonce, ec)
# the challenge
c = challenge_(msg_hash, x_Q, x_K, ec, hf)
return _sign_(c, q, nonce, x_K, ec)
def sign(
msg: Octets,
prv_key: PrvKey,
nonce: Optional[PrvKey] = None,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> Sig:
"""Sign message according to BIP340 signature algorithm.
The message msg is first processed by hf, yielding the value
msg_hash = hf(msg),
a sequence of bits of length *hf_len*.
Normally, hf is chosen such that its output length *hf_len* is
roughly equal to *nlen*, the bit-length of the group order *n*,
since the overall security of the signature scheme will depend on
the smallest of *hf_len* and *nlen*; however, ECSSA
supports all combinations of *hf_len* and *nlen*.
The BIP340 deterministic nonce (not RFC6979) is used.
"""
msg_hash = reduce_to_hlen(msg, hf)
return sign_(msg_hash, prv_key, nonce, ec, hf)
def _assert_as_valid_(c: int, QJ: JacPoint, r: int, s: int, ec: Curve) -> None:
# Private function for test/dev purposes
# It raises Errors, while verify should always return True or False
# Let K = sG - eQ.
# in Jacobian coordinates
KJ = _double_mult(ec.n - c, QJ, s, ec.GJ, ec)
# Fail if infinite(KJ).
# Fail if y_K is odd.
if ec.y_aff_from_jac(KJ) % 2:
raise BTClibRuntimeError("y_K is odd")
# Fail if x_K ≠ r
if KJ[0] != KJ[2] * KJ[2] * r % ec.p:
raise BTClibRuntimeError("signature verification failed")
def assert_as_valid_(
msg_hash: Octets, Q: BIP340PubKey, sig: Union[Sig, Octets], hf: HashF = sha256
) -> None:
# Private function for test/dev purposes
# It raises Errors, while verify should always return True or False
if isinstance(sig, Sig):
sig.assert_valid()
else:
sig = Sig.parse(sig)
x_Q, y_Q = point_from_bip340pub_key(Q, sig.ec)
# Let c = int(hf(bytes(r) || bytes(Q) || msg_hash)) mod n.
c = challenge_(msg_hash, x_Q, sig.r, sig.ec, hf)
_assert_as_valid_(c, (x_Q, y_Q, 1), sig.r, sig.s, sig.ec)
def assert_as_valid(
msg: Octets, Q: BIP340PubKey, sig: Union[Sig, Octets], hf: HashF = sha256
) -> None:
msg_hash = reduce_to_hlen(msg, hf)
assert_as_valid_(msg_hash, Q, sig, hf)
def verify_(
msg_hash: Octets, Q: BIP340PubKey, sig: Union[Sig, Octets], hf: HashF = sha256
) -> bool:
"Verify the BIP340 signature of the provided message."
# all kind of Exceptions are catched because
# verify must always return a bool
try:
assert_as_valid_(msg_hash, Q, sig, hf)
except Exception: # pylint: disable=broad-except
return False
else:
return True
def verify(
msg: Octets, Q: BIP340PubKey, sig: Union[Sig, Octets], hf: HashF = sha256
) -> bool:
"Verify the BIP340 signature of the provided message."
msg_hash = reduce_to_hlen(msg, hf)
return verify_(msg_hash, Q, sig, hf)
def _recover_pub_key_(c: int, r: int, s: int, ec: Curve) -> int:
# Private function provided for testing purposes only.
if c == 0:
raise BTClibRuntimeError("invalid zero challenge")
KJ = r, ec.y_even(r), 1
e1 = mod_inv(c, ec.n)
QJ = _double_mult(ec.n - e1, KJ, e1 * s, ec.GJ, ec)
# edge case that cannot be reproduced in the test suite
if QJ[2] == 0:
err_msg = "invalid (INF) key" # pragma: no cover
raise BTClibRuntimeError(err_msg) # pragma: no cover
return ec.x_aff_from_jac(QJ)
def crack_prv_key_(
msg_hash1: Octets,
sig1: Union[Sig, Octets],
msg_hash2: Octets,
sig2: Union[Sig, Octets],
Q: BIP340PubKey,
hf: HashF = sha256,
) -> Tuple[int, int]:
if isinstance(sig1, Sig):
sig1.assert_valid()
else:
sig1 = Sig.parse(sig1)
if isinstance(sig2, Sig):
sig2.assert_valid()
else:
sig2 = Sig.parse(sig2)
ec = sig2.ec
if sig1.ec != ec:
raise BTClibValueError("not the same curve in signatures")
if sig1.r != sig2.r:
raise BTClibValueError("not the same r in signatures")
if sig1.s == sig2.s:
raise BTClibValueError("identical signatures")
x_Q = point_from_bip340pub_key(Q, ec)[0]
c_1 = challenge_(msg_hash1, x_Q, sig1.r, ec, hf)
c_2 = challenge_(msg_hash2, x_Q, sig2.r, ec, hf)
q = (sig1.s - sig2.s) * mod_inv(c_2 - c_1, ec.n) % ec.n
nonce = (sig1.s + c_1 * q) % ec.n
q, _ = gen_keys(q)
nonce, _ = gen_keys(nonce)
return q, nonce
def crack_prv_key(
msg1: Octets,
sig1: Union[Sig, Octets],
msg2: Octets,
sig2: Union[Sig, Octets],
Q: BIP340PubKey,
hf: HashF = sha256,
) -> Tuple[int, int]:
msg_hash1 = reduce_to_hlen(msg1, hf)
msg_hash2 = reduce_to_hlen(msg2, hf)
return crack_prv_key_(msg_hash1, sig1, msg_hash2, sig2, Q, hf)
def assert_batch_as_valid_(
m_hashes: Sequence[Octets],
Qs: Sequence[BIP340PubKey],
sigs: Sequence[Sig],
hf: HashF = sha256,
) -> None:
batch_size = len(Qs)
if batch_size == 0:
raise BTClibValueError("no signatures provided")
if len(m_hashes) != batch_size:
err_msg = f"mismatch between number of pub_keys ({batch_size}) "
err_msg += f"and number of messages ({len(m_hashes)})"
raise BTClibValueError(err_msg)
if len(sigs) != batch_size:
err_msg = f"mismatch between number of pub_keys ({batch_size}) "
err_msg += f"and number of signatures ({len(sigs)})"
raise BTClibValueError(err_msg)
if batch_size == 1:
assert_as_valid_(m_hashes[0], Qs[0], sigs[0], hf)
return None
ec = sigs[0].ec
if any(sig.ec != ec for sig in sigs):
raise BTClibValueError("not the same curve for all signatures")
t = 0
scalars: List[int] = []
points: List[JacPoint] = []
for i, (msg_hash, Q, sig) in enumerate(zip(m_hashes, Qs, sigs)):
msg_hash = bytes_from_octets(msg_hash, hf().digest_size)
KJ = sig.r, ec.y_even(sig.r), 1
x_Q, y_Q = point_from_bip340pub_key(Q, ec)
QJ = x_Q, y_Q, 1
c = challenge_(msg_hash, x_Q, sig.r, ec, hf)
# rand in [1, n-1]
# deterministically generated using a CSPRNG seeded by a
# cryptographic hash (e.g., SHA256) of all inputs of the
# algorithm, or randomly generated independently for each
# run of the batch verification algorithm
rand = 1 if i == 0 else 1 + secrets.randbelow(ec.n - 1)
scalars.append(rand)
points.append(KJ)
scalars.append(rand * c % ec.n)
points.append(QJ)
t += rand * sig.s
TJ = _mult(t, ec.GJ, ec)
RHSJ = _multi_mult(scalars, points, ec)
# return T == RHS, checked in Jacobian coordinates
RHSZ2 = RHSJ[2] * RHSJ[2]
TZ2 = TJ[2] * TJ[2]
if (TJ[0] * RHSZ2 % ec.p != RHSJ[0] * TZ2 % ec.p) or (
TJ[1] * RHSZ2 * RHSJ[2] % ec.p != RHSJ[1] * TZ2 * TJ[2] % ec.p
):
raise BTClibRuntimeError("signature verification failed")
return None
def assert_batch_as_valid(
ms: Sequence[Octets],
Qs: Sequence[BIP340PubKey],
sigs: Sequence[Sig],
hf: HashF = sha256,
) -> None:
m_hashes = [reduce_to_hlen(msg, hf) for msg in ms]
return assert_batch_as_valid_(m_hashes, Qs, sigs, hf)
def batch_verify_(
m_hashes: Sequence[Octets],
Qs: Sequence[BIP340PubKey],
sigs: Sequence[Sig],
hf: HashF = sha256,
) -> bool:
# all kind of Exceptions are catched because
# verify must always return a bool
try:
assert_batch_as_valid_(m_hashes, Qs, sigs, hf)
except Exception: # pylint: disable=broad-except
return False
return True
def batch_verify(
ms: Sequence[Octets],
Qs: Sequence[BIP340PubKey],
sigs: Sequence[Sig],
hf: HashF = sha256,
) -> bool:
"Batch verification of BIP340 signatures."
m_hashes = [reduce_to_hlen(msg, hf) for msg in ms]
return batch_verify_(m_hashes, Qs, sigs, hf)
| 31.243243
| 88
| 0.655872
|
import secrets
from dataclasses import InitVar, dataclass
from hashlib import sha256
from typing import List, Optional, Sequence, Tuple, Type, Union
from btclib.alias import BinaryData, HashF, Integer, JacPoint, Octets, Point
from btclib.bip32.bip32 import BIP32Key
from btclib.ecc.curve import Curve, secp256k1
from btclib.ecc.curve_group import _double_mult, _mult, _multi_mult
from btclib.ecc.number_theory import mod_inv
from btclib.exceptions import BTClibRuntimeError, BTClibTypeError, BTClibValueError
from btclib.hashes import reduce_to_hlen, tagged_hash
from btclib.to_prv_key import PrvKey, int_from_prv_key
from btclib.to_pub_key import point_from_pub_key
from btclib.utils import (
bytes_from_octets,
bytesio_from_binarydata,
hex_string,
int_from_bits,
)
@dataclass(frozen=True)
class Sig:
r: int
s: int
ec: Curve = secp256k1
check_validity: InitVar[bool] = True
def __post_init__(self, check_validity: bool) -> None:
if check_validity:
self.assert_valid()
def assert_valid(self) -> None:
self.ec.y(self.r)
if not 0 <= self.s < self.ec.n:
err_msg = "scalar s not in 0..n-1: "
err_msg += f"'{hex_string(self.s)}'" if self.s > 0xFFFFFFFF else f"{self.s}"
raise BTClibValueError(err_msg)
def serialize(self, check_validity: bool = True) -> bytes:
if check_validity:
self.assert_valid()
out = self.r.to_bytes(self.ec.p_size, byteorder="big", signed=False)
out += self.s.to_bytes(self.ec.n_size, byteorder="big", signed=False)
return out
@classmethod
def parse(cls: Type["Sig"], data: BinaryData, check_validity: bool = True) -> "Sig":
stream = bytesio_from_binarydata(data)
ec = secp256k1
r = int.from_bytes(stream.read(ec.p_size), byteorder="big", signed=False)
s = int.from_bytes(stream.read(ec.n_size), byteorder="big", signed=False)
return cls(r, s, ec, check_validity)
BIP340PubKey = Union[Integer, Octets, BIP32Key, Point]
def point_from_bip340pub_key(x_Q: BIP340PubKey, ec: Curve = secp256k1) -> Point:
if isinstance(x_Q, int):
return x_Q, ec.y_even(x_Q)
try:
x_Q = point_from_pub_key(x_Q, ec)[0]
return x_Q, ec.y_even(x_Q)
except BTClibValueError:
pass
if isinstance(x_Q, (str, bytes)):
Q = bytes_from_octets(x_Q, ec.p_size)
x_Q = int.from_bytes(Q, "big", signed=False)
return x_Q, ec.y_even(x_Q)
raise BTClibTypeError("not a BIP340 public key")
def gen_keys_(
prv_key: Optional[PrvKey] = None, ec: Curve = secp256k1
) -> Tuple[int, int, JacPoint]:
if prv_key is None:
q = 1 + secrets.randbelow(ec.n - 1)
else:
q = int_from_prv_key(prv_key, ec)
QJ = _mult(q, ec.GJ, ec)
x_Q, y_Q = ec.aff_from_jac(QJ)
if y_Q % 2:
q = ec.n - q
QJ = ec.negate_jac(QJ)
return q, x_Q, QJ
def gen_keys(
prv_key: Optional[PrvKey] = None, ec: Curve = secp256k1
) -> Tuple[int, int]:
q, x_Q, _ = gen_keys_(prv_key, ec)
return q, x_Q
def _det_nonce_(
msg_hash: bytes, q: int, Q: int, aux: bytes, ec: Curve, hf: HashF
) -> int:
randomizer = tagged_hash("BIP0340/aux".encode(), aux, hf)
xor = q ^ int.from_bytes(randomizer, "big", signed=False)
max_len = max(ec.n_size, hf().digest_size)
t = b"".join(
[
xor.to_bytes(max_len, byteorder="big", signed=False),
Q.to_bytes(ec.p_size, byteorder="big", signed=False),
msg_hash,
]
)
nonce_tag = "BIP0340/nonce".encode()
while True:
t = tagged_hash(nonce_tag, t, hf)
nonce = int_from_bits(t, ec.nlen)
if 0 < nonce < ec.n:
return nonce
def det_nonce_(
msg_hash: Octets,
prv_key: PrvKey,
aux: Optional[Octets] = None,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> int:
hf_len = hf().digest_size
msg_hash = bytes_from_octets(msg_hash, hf_len)
q, Q = gen_keys(prv_key, ec)
aux = secrets.token_bytes(hf_len) if aux is None else bytes_from_octets(aux)
return _det_nonce_(msg_hash, q, Q, aux, ec, hf)
def challenge_(msg_hash: Octets, x_Q: int, x_K: int, ec: Curve, hf: HashF) -> int:
hf_len = hf().digest_size
msg_hash = bytes_from_octets(msg_hash, hf_len)
t = b"".join(
[
x_K.to_bytes(ec.p_size, byteorder="big", signed=False),
x_Q.to_bytes(ec.p_size, byteorder="big", signed=False),
msg_hash,
]
)
t = tagged_hash("BIP0340/challenge".encode(), t, hf)
c = int_from_bits(t, ec.nlen) % ec.n
if c == 0:
raise BTClibRuntimeError("invalid zero challenge")
return c
def _sign_(c: int, q: int, nonce: int, r: int, ec: Curve) -> Sig:
if c == 0:
raise BTClibRuntimeError("invalid zero challenge")
s = (nonce + c * q) % ec.n
return Sig(r, s, ec)
def sign_(
msg_hash: Octets,
prv_key: PrvKey,
nonce: Optional[PrvKey] = None,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> Sig:
hf_len = hf().digest_size
msg_hash = bytes_from_octets(msg_hash, hf_len)
q, x_Q = gen_keys(prv_key, ec)
if nonce is None:
nonce = _det_nonce_(msg_hash, q, x_Q, secrets.token_bytes(hf_len), ec, hf)
nonce, x_K = gen_keys(nonce, ec)
c = challenge_(msg_hash, x_Q, x_K, ec, hf)
return _sign_(c, q, nonce, x_K, ec)
def sign(
msg: Octets,
prv_key: PrvKey,
nonce: Optional[PrvKey] = None,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> Sig:
msg_hash = reduce_to_hlen(msg, hf)
return sign_(msg_hash, prv_key, nonce, ec, hf)
def _assert_as_valid_(c: int, QJ: JacPoint, r: int, s: int, ec: Curve) -> None:
KJ = _double_mult(ec.n - c, QJ, s, ec.GJ, ec)
if ec.y_aff_from_jac(KJ) % 2:
raise BTClibRuntimeError("y_K is odd")
if KJ[0] != KJ[2] * KJ[2] * r % ec.p:
raise BTClibRuntimeError("signature verification failed")
def assert_as_valid_(
msg_hash: Octets, Q: BIP340PubKey, sig: Union[Sig, Octets], hf: HashF = sha256
) -> None:
if isinstance(sig, Sig):
sig.assert_valid()
else:
sig = Sig.parse(sig)
x_Q, y_Q = point_from_bip340pub_key(Q, sig.ec)
c = challenge_(msg_hash, x_Q, sig.r, sig.ec, hf)
_assert_as_valid_(c, (x_Q, y_Q, 1), sig.r, sig.s, sig.ec)
def assert_as_valid(
msg: Octets, Q: BIP340PubKey, sig: Union[Sig, Octets], hf: HashF = sha256
) -> None:
msg_hash = reduce_to_hlen(msg, hf)
assert_as_valid_(msg_hash, Q, sig, hf)
def verify_(
msg_hash: Octets, Q: BIP340PubKey, sig: Union[Sig, Octets], hf: HashF = sha256
) -> bool:
try:
assert_as_valid_(msg_hash, Q, sig, hf)
except Exception:
return False
else:
return True
def verify(
msg: Octets, Q: BIP340PubKey, sig: Union[Sig, Octets], hf: HashF = sha256
) -> bool:
msg_hash = reduce_to_hlen(msg, hf)
return verify_(msg_hash, Q, sig, hf)
def _recover_pub_key_(c: int, r: int, s: int, ec: Curve) -> int:
if c == 0:
raise BTClibRuntimeError("invalid zero challenge")
KJ = r, ec.y_even(r), 1
e1 = mod_inv(c, ec.n)
QJ = _double_mult(ec.n - e1, KJ, e1 * s, ec.GJ, ec)
if QJ[2] == 0:
err_msg = "invalid (INF) key"
raise BTClibRuntimeError(err_msg)
return ec.x_aff_from_jac(QJ)
def crack_prv_key_(
msg_hash1: Octets,
sig1: Union[Sig, Octets],
msg_hash2: Octets,
sig2: Union[Sig, Octets],
Q: BIP340PubKey,
hf: HashF = sha256,
) -> Tuple[int, int]:
if isinstance(sig1, Sig):
sig1.assert_valid()
else:
sig1 = Sig.parse(sig1)
if isinstance(sig2, Sig):
sig2.assert_valid()
else:
sig2 = Sig.parse(sig2)
ec = sig2.ec
if sig1.ec != ec:
raise BTClibValueError("not the same curve in signatures")
if sig1.r != sig2.r:
raise BTClibValueError("not the same r in signatures")
if sig1.s == sig2.s:
raise BTClibValueError("identical signatures")
x_Q = point_from_bip340pub_key(Q, ec)[0]
c_1 = challenge_(msg_hash1, x_Q, sig1.r, ec, hf)
c_2 = challenge_(msg_hash2, x_Q, sig2.r, ec, hf)
q = (sig1.s - sig2.s) * mod_inv(c_2 - c_1, ec.n) % ec.n
nonce = (sig1.s + c_1 * q) % ec.n
q, _ = gen_keys(q)
nonce, _ = gen_keys(nonce)
return q, nonce
def crack_prv_key(
msg1: Octets,
sig1: Union[Sig, Octets],
msg2: Octets,
sig2: Union[Sig, Octets],
Q: BIP340PubKey,
hf: HashF = sha256,
) -> Tuple[int, int]:
msg_hash1 = reduce_to_hlen(msg1, hf)
msg_hash2 = reduce_to_hlen(msg2, hf)
return crack_prv_key_(msg_hash1, sig1, msg_hash2, sig2, Q, hf)
def assert_batch_as_valid_(
m_hashes: Sequence[Octets],
Qs: Sequence[BIP340PubKey],
sigs: Sequence[Sig],
hf: HashF = sha256,
) -> None:
batch_size = len(Qs)
if batch_size == 0:
raise BTClibValueError("no signatures provided")
if len(m_hashes) != batch_size:
err_msg = f"mismatch between number of pub_keys ({batch_size}) "
err_msg += f"and number of messages ({len(m_hashes)})"
raise BTClibValueError(err_msg)
if len(sigs) != batch_size:
err_msg = f"mismatch between number of pub_keys ({batch_size}) "
err_msg += f"and number of signatures ({len(sigs)})"
raise BTClibValueError(err_msg)
if batch_size == 1:
assert_as_valid_(m_hashes[0], Qs[0], sigs[0], hf)
return None
ec = sigs[0].ec
if any(sig.ec != ec for sig in sigs):
raise BTClibValueError("not the same curve for all signatures")
t = 0
scalars: List[int] = []
points: List[JacPoint] = []
for i, (msg_hash, Q, sig) in enumerate(zip(m_hashes, Qs, sigs)):
msg_hash = bytes_from_octets(msg_hash, hf().digest_size)
KJ = sig.r, ec.y_even(sig.r), 1
x_Q, y_Q = point_from_bip340pub_key(Q, ec)
QJ = x_Q, y_Q, 1
c = challenge_(msg_hash, x_Q, sig.r, ec, hf)
rand = 1 if i == 0 else 1 + secrets.randbelow(ec.n - 1)
scalars.append(rand)
points.append(KJ)
scalars.append(rand * c % ec.n)
points.append(QJ)
t += rand * sig.s
TJ = _mult(t, ec.GJ, ec)
RHSJ = _multi_mult(scalars, points, ec)
RHSZ2 = RHSJ[2] * RHSJ[2]
TZ2 = TJ[2] * TJ[2]
if (TJ[0] * RHSZ2 % ec.p != RHSJ[0] * TZ2 % ec.p) or (
TJ[1] * RHSZ2 * RHSJ[2] % ec.p != RHSJ[1] * TZ2 * TJ[2] % ec.p
):
raise BTClibRuntimeError("signature verification failed")
return None
def assert_batch_as_valid(
ms: Sequence[Octets],
Qs: Sequence[BIP340PubKey],
sigs: Sequence[Sig],
hf: HashF = sha256,
) -> None:
m_hashes = [reduce_to_hlen(msg, hf) for msg in ms]
return assert_batch_as_valid_(m_hashes, Qs, sigs, hf)
def batch_verify_(
m_hashes: Sequence[Octets],
Qs: Sequence[BIP340PubKey],
sigs: Sequence[Sig],
hf: HashF = sha256,
) -> bool:
try:
assert_batch_as_valid_(m_hashes, Qs, sigs, hf)
except Exception:
return False
return True
def batch_verify(
ms: Sequence[Octets],
Qs: Sequence[BIP340PubKey],
sigs: Sequence[Sig],
hf: HashF = sha256,
) -> bool:
m_hashes = [reduce_to_hlen(msg, hf) for msg in ms]
return batch_verify_(m_hashes, Qs, sigs, hf)
| true
| true
|
f71960081cd60ffe81c41f006c1585cf0ab6b33d
| 763
|
py
|
Python
|
river/metrics/smape.py
|
brcharron/creme
|
25290780f6bba0eb030215194e81b120d0219389
|
[
"BSD-3-Clause"
] | 1
|
2020-12-04T18:56:19.000Z
|
2020-12-04T18:56:19.000Z
|
river/metrics/smape.py
|
brcharron/creme
|
25290780f6bba0eb030215194e81b120d0219389
|
[
"BSD-3-Clause"
] | null | null | null |
river/metrics/smape.py
|
brcharron/creme
|
25290780f6bba0eb030215194e81b120d0219389
|
[
"BSD-3-Clause"
] | null | null | null |
from . import base
__all__ = ['SMAPE']
class SMAPE(base.MeanMetric, base.RegressionMetric):
"""Symmetric mean absolute percentage error.
Examples
--------
>>> from river import metrics
>>> y_true = [0, 0.07533, 0.07533, 0.07533, 0.07533, 0.07533, 0.07533, 0.0672, 0.0672]
>>> y_pred = [0, 0.102, 0.107, 0.047, 0.1, 0.032, 0.047, 0.108, 0.089]
>>> metric = metrics.SMAPE()
>>> for yt, yp in zip(y_true, y_pred):
... metric = metric.update(yt, yp)
>>> metric
SMAPE: 37.869392
"""
def _eval(self, y_true, y_pred):
den = abs(y_true) + abs(y_pred)
if den == 0:
return 0.
return 2. * abs(y_true - y_pred) / den
def get(self):
return 100 * super().get()
| 21.8
| 90
| 0.549148
|
from . import base
__all__ = ['SMAPE']
class SMAPE(base.MeanMetric, base.RegressionMetric):
def _eval(self, y_true, y_pred):
den = abs(y_true) + abs(y_pred)
if den == 0:
return 0.
return 2. * abs(y_true - y_pred) / den
def get(self):
return 100 * super().get()
| true
| true
|
f7196073162dfb6d958cbb439ef46151f867a863
| 371,245
|
py
|
Python
|
pandas/core/generic.py
|
rhshadrach/pandas
|
8f51c998e84feeac6cb760a9f12baf6948cd5922
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-08-06T14:07:02.000Z
|
2021-08-06T14:07:02.000Z
|
pandas/core/generic.py
|
jdsurya/pandas
|
777c0f90c6067c636fcd76ce003a8fbfcc311d7b
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/generic.py
|
jdsurya/pandas
|
777c0f90c6067c636fcd76ce003a8fbfcc311d7b
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-02-03T11:02:42.000Z
|
2021-02-03T11:02:42.000Z
|
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, lib
from pandas._typing import (
Axis,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
Label,
Level,
Renamer,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.indexes.api import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
if TYPE_CHECKING:
from pandas.core.resample import Resampler
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: List[str] = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
copy: bool = False,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
@classmethod
def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=cls._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
"""
Dictionary of global attributes on this object.
.. warning::
attrs is experimental and may change without warning.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
@classmethod
def _validate_dtype(cls, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""
Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
_AXIS_ALIASES = {"rows": 0}
_AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_NUMBERS: Dict[str, int]
_AXIS_NAMES: Dict[int, str]
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in cls._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, ABCSeries] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._data.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
Returns
-------
DataFrame
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self: FrameOrSeries, item) -> FrameOrSeries:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
baxis = self._get_block_manager_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
return None
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = self._values
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = self._values
if is_bool_dtype(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._data.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self)
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
f"The {label_axis_name} label '{key}' "
f"is not unique.{multi_message}"
)
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_data=self._data,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_markdown"
] = """
Print %(klass)s in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
%(klass)s in Markdown-friendly format.
"""
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: Optional[str] = "infer",
index: bool_t = True,
indent: Optional[int] = None,
) -> Optional[str]:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {'split','records','index','table'}.
* DataFrame:
- default is 'columns'
- allowed values are: {'split', 'records', 'index', 'columns',
'values', 'table'}.
* The format of the JSON string:
- 'split' : dict like {'index' -> [index], 'columns' -> [columns],
'data' -> [values]}
- 'records' : list like [{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
Describing the data, where data component is like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
.. versionadded:: 0.21.0.
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{booktabs}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
Added to Series.
.. versionchanged:: 1.0.0
Added caption and label arguments.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
The LaTeX caption to be placed inside ``\caption{}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
This is used with ``\ref{}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
# Note: we need to call ref._maybe_cache_changed even in the
# case where it will raise. (Uh, not clear why)
try:
ref._maybe_cache_changed(cacher[0], self)
except AssertionError:
# ref._data.setitem can raise
# AssertionError because of shape mismatch
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self)
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._data.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs: Callable = xs
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform %(klass)s to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError as err:
raise KeyError(
"String passed to weights not a valid column"
) from err
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values.
Produced %(klass)s will have same axis length as self.
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method=None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""
Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self) -> bool_t:
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self) -> bool_t:
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
def _internal_get_values(self) -> np.ndarray:
"""
Return an ndarray after converting sparse values to dense.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
"""
return self.values
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
copy: bool_t = True,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
possible to turn off individual conversions to ``StringDtype``, the integer
extension types or ``BooleanDtype``, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 NaN
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError(f"invalid fill value with a {type(value)}")
if inplace:
self._update_inplace(new_data)
return None
else:
return self._constructor(new_data).__finalize__(self)
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
if not (
is_scalar(to_replace)
or isinstance(to_replace, pd.Series)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where, dtype=np.float64)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: int = 0,
on=None,
level=None,
) -> "Resampler":
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset initial periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.is_anchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset final periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit.
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = np.asarray(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(
self: FrameOrSeries, percentiles=None, include=None, exclude=None
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data):
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Label] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self._ensure_type(
self.fillna(method=fill_method, axis=axis, limit=limit)
)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name1, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_any_desc,
func=nanops.nanany,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_all_desc,
func=nanops.nanall,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc_mad)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
func=nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
func=nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
func=nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="minimum",
accum_func=np.minimum.accumulate,
accum_func_name="min",
mask_a=np.inf,
mask_b=np.nan,
examples=_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="sum",
accum_func=np.cumsum,
accum_func_name="sum",
mask_a=0.0,
mask_b=np.nan,
examples=_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="product",
accum_func=np.cumprod,
accum_func_name="prod",
mask_a=1.0,
mask_b=np.nan,
examples=_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="maximum",
accum_func=np.maximum.accumulate,
accum_func_name="max",
mask_a=-np.inf,
mask_b=np.nan,
examples=_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the sum of the values for the requested axis.\n\n"
"This is equivalent to the method ``numpy.sum``.",
func=nanops.nansum,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the mean of the values for the requested axis.",
func=nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
func=nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
func=nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the product of the values for the requested axis.",
func=nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the median of the values for the requested axis.",
func=nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the maximum of the values for the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
func=nanops.nanmax,
see_also=_stat_func_see_also,
examples=_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the minimum of the values for the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
func=nanops.nanmin,
see_also=_stat_func_see_also,
examples=_min_examples,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = (
f"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_doc_mad = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(
cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable
) -> Callable:
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
accum_func: Callable,
accum_func_name: str,
mask_a: float,
mask_b: float,
examples: str,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def na_accum_func(blk_values):
# We will be applying this function to block values
if blk_values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = blk_values.dtype
# We need to define mask before masking NaTs
mask = isna(blk_values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = blk_values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = blk_values
changed = False
result = accum_func(y.view("i8"), axis)
if skipna:
np.putmask(result, mask, iNaT)
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(blk_values, np.ndarray):
result = result.view(orig_dtype)
else:
# DatetimeArray
result = type(blk_values)._from_sequence(result, dtype=orig_dtype)
elif skipna and not issubclass(
blk_values.dtype.type, (np.integer, np.bool_)
):
vals = blk_values.copy().T
mask = isna(vals)
np.putmask(vals, mask, mask_a)
result = accum_func(vals, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(blk_values.T, axis)
# transpose back for ndarray, not for EA
return result.T if hasattr(result, "T") else result
result = self._data.apply(na_accum_func)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str,
examples: str,
empty_value: bool,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
| 33.135041
| 104
| 0.529394
|
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, lib
from pandas._typing import (
Axis,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
Label,
Level,
Renamer,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.indexes.api import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
if TYPE_CHECKING:
from pandas.core.resample import Resampler
_shared_docs: Dict[str, str] = dict()
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
_internal_names: List[str] = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
def __init__(
self,
data: BlockManager,
copy: bool = False,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
):
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
@classmethod
def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=cls._get_block_manager_axis(a), copy=False
)
if copy:
mgr = mgr.copy()
if dtype is not None:
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
@classmethod
def _validate_dtype(cls, dtype):
if dtype is not None:
dtype = pandas_dtype(dtype)
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
raise NotImplementedError
_AXIS_ALIASES = {"rows": 0}
_AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_NUMBERS: Dict[str, int]
_AXIS_NAMES: Dict[int, str]
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
def _construct_axes_dict(self, axes=None, **kwargs):
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool = False, sentinel=None
):
args = list(args)
for a in cls._AXIS_ORDERS:
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, ABCSeries] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
return self._data.ndim
@property
def size(self) -> int:
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
return self
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._data.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self: FrameOrSeries, item) -> FrameOrSeries:
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
baxis = self._get_block_manager_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
return None
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
def __neg__(self):
values = self._values
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = self._values
if is_bool_dtype(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
return self
new_data = self._data.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self)
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
def _is_level_reference(self, key, axis=0):
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
f"The {label_axis_name} label '{key}' "
f"is not unique.{multi_message}"
)
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
axis = self._get_axis_number(axis)
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
)
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
dropped = self.copy()
if axis == 0:
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
dropped.columns = RangeIndex(dropped.columns.size)
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
return iter(self._info_axis)
def keys(self):
return self._info_axis
def items(self):
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self) -> int:
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
return key in self._info_axis
@property
def empty(self) -> bool_t:
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
if is_scalar(result):
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_data=self._data,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
def __repr__(self) -> str:
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
_shared_docs[
"to_markdown"
] = """
Print %(klass)s in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
%(klass)s in Markdown-friendly format.
"""
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: Optional[str] = "infer",
index: bool_t = True,
indent: Optional[int] = None,
) -> Optional[str]:
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
) -> None:
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
):
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
self._data.set(item, value)
@property
def _is_cached(self) -> bool_t:
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
# Note: we need to call ref._maybe_cache_changed even in the
# case where it will raise. (Uh, not clear why)
try:
ref._maybe_cache_changed(cacher[0], self)
except AssertionError:
# ref._data.setitem can raise
# AssertionError because of shape mismatch
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self)
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._data.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs: Callable = xs
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self) -> bool_t:
return self._data.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
):
raise AbstractMethodError(self)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError as err:
raise KeyError(
"String passed to weights not a valid column"
) from err
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values.
Produced %(klass)s will have same axis length as self.
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method=None, **kwargs
) -> FrameOrSeries:
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name: str):
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self) -> bool_t:
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self) -> bool_t:
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
return self.values
def _internal_get_values(self) -> np.ndarray:
return self.values
@property
def dtypes(self):
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
copy: bool_t = True,
) -> FrameOrSeries:
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError(f"invalid fill value with a {type(value)}")
if inplace:
self._update_inplace(new_data)
return None
else:
return self._constructor(new_data).__finalize__(self)
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
if not (
is_scalar(to_replace)
or isinstance(to_replace, pd.Series)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where, dtype=np.float64)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: int = 0,
on=None,
level=None,
) -> "Resampler":
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.is_anchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit.
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = np.asarray(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) -> FrameOrSeries:
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
return np.abs(self)
def describe(
self: FrameOrSeries, percentiles=None, include=None, exclude=None
) -> FrameOrSeries:
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data):
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Label] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self._ensure_type(
self.fillna(method=fill_method, axis=axis, limit=limit)
)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
axis_descr, name1, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_any_desc,
func=nanops.nanany,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_all_desc,
func=nanops.nanall,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc_mad)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
func=nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
func=nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
func=nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="minimum",
accum_func=np.minimum.accumulate,
accum_func_name="min",
mask_a=np.inf,
mask_b=np.nan,
examples=_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="sum",
accum_func=np.cumsum,
accum_func_name="sum",
mask_a=0.0,
mask_b=np.nan,
examples=_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="product",
accum_func=np.cumprod,
accum_func_name="prod",
mask_a=1.0,
mask_b=np.nan,
examples=_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="maximum",
accum_func=np.maximum.accumulate,
accum_func_name="max",
mask_a=-np.inf,
mask_b=np.nan,
examples=_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the sum of the values for the requested axis.\n\n"
"This is equivalent to the method ``numpy.sum``.",
func=nanops.nansum,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the mean of the values for the requested axis.",
func=nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
func=nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
func=nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the product of the values for the requested axis.",
func=nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the median of the values for the requested axis.",
func=nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the maximum of the values for the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
func=nanops.nanmax,
see_also=_stat_func_see_also,
examples=_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the minimum of the values for the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
func=nanops.nanmin,
see_also=_stat_func_see_also,
examples=_min_examples,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how: str):
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
axis_descr = (
f"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_doc_mad = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(
cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable
) -> Callable:
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
accum_func: Callable,
accum_func_name: str,
mask_a: float,
mask_b: float,
examples: str,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def na_accum_func(blk_values):
# We will be applying this function to block values
if blk_values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = blk_values.dtype
# We need to define mask before masking NaTs
mask = isna(blk_values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = blk_values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = blk_values
changed = False
result = accum_func(y.view("i8"), axis)
if skipna:
np.putmask(result, mask, iNaT)
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(blk_values, np.ndarray):
result = result.view(orig_dtype)
else:
# DatetimeArray
result = type(blk_values)._from_sequence(result, dtype=orig_dtype)
elif skipna and not issubclass(
blk_values.dtype.type, (np.integer, np.bool_)
):
vals = blk_values.copy().T
mask = isna(vals)
np.putmask(vals, mask, mask_a)
result = accum_func(vals, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(blk_values.T, axis)
# transpose back for ndarray, not for EA
return result.T if hasattr(result, "T") else result
result = self._data.apply(na_accum_func)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str,
examples: str,
empty_value: bool,
) -> Callable:
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
| true
| true
|
f719607b2a4e7f5eac2478307ce00dc1f2055669
| 2,541
|
py
|
Python
|
e3sm_to_cmip/cmor_handlers/zos.py
|
xylar/e3sm_to_cmip
|
4fbe8fc91475eae26df839d0cd8062c4b8dc16ae
|
[
"MIT"
] | 7
|
2018-05-03T12:30:06.000Z
|
2022-01-20T23:52:02.000Z
|
e3sm_to_cmip/cmor_handlers/zos.py
|
xylar/e3sm_to_cmip
|
4fbe8fc91475eae26df839d0cd8062c4b8dc16ae
|
[
"MIT"
] | 91
|
2018-05-02T21:11:30.000Z
|
2022-03-30T20:25:07.000Z
|
e3sm_to_cmip/cmor_handlers/zos.py
|
xylar/e3sm_to_cmip
|
4fbe8fc91475eae26df839d0cd8062c4b8dc16ae
|
[
"MIT"
] | 7
|
2018-05-15T02:07:34.000Z
|
2021-06-30T18:20:33.000Z
|
"""
compute Sea Surface Height Above Geoid, zos
"""
from __future__ import absolute_import, division, print_function
import xarray
import logging
from e3sm_to_cmip import mpas
from e3sm_to_cmip.util import print_message
# 'MPAS' as a placeholder for raw variables needed
RAW_VARIABLES = ['MPASO', 'MPAS_mesh', 'MPAS_map']
# output variable name
VAR_NAME = 'zos'
VAR_UNITS = 'm'
TABLE = 'CMIP6_Omon.json'
def handle(infiles, tables, user_input_path, **kwargs):
"""
Transform MPASO timeMonthly_avg_pressureAdjustedSSH into CMIP.zos
Parameters
----------
infiles : dict
a dictionary with namelist, mesh and time series file names
tables : str
path to CMOR tables
user_input_path : str
path to user input json file
Returns
-------
varname : str
the name of the processed variable after processing is complete
"""
if kwargs.get('simple'):
msg = f"{VAR_NAME} is not supported for simple conversion"
print_message(msg)
return
msg = 'Starting {name}'.format(name=__name__)
logging.info(msg)
meshFileName = infiles['MPAS_mesh']
mappingFileName = infiles['MPAS_map']
timeSeriesFiles = infiles['MPASO']
dsMesh = xarray.open_dataset(meshFileName, mask_and_scale=False)
cellMask2D, _ = mpas.get_cell_masks(dsMesh)
areaCell = dsMesh.areaCell.where(cellMask2D)
variableList = ['timeMonthly_avg_pressureAdjustedSSH',
'xtime_startMonthly', 'xtime_endMonthly']
ds = xarray.Dataset()
with mpas.open_mfdataset(timeSeriesFiles, variableList) as dsIn:
ssh = dsIn.timeMonthly_avg_pressureAdjustedSSH.where(cellMask2D)
sshAvg = (ssh*areaCell).sum(dim='nCells')/areaCell.sum(dim='nCells')
ds[VAR_NAME] = ssh - sshAvg
ds = mpas.add_time(ds, dsIn)
ds.compute()
ds = mpas.remap(ds, mappingFileName)
mpas.setup_cmor(VAR_NAME, tables, user_input_path, component='ocean')
# create axes
axes = [{'table_entry': 'time',
'units': ds.time.units},
{'table_entry': 'latitude',
'units': 'degrees_north',
'coord_vals': ds.lat.values,
'cell_bounds': ds.lat_bnds.values},
{'table_entry': 'longitude',
'units': 'degrees_east',
'coord_vals': ds.lon.values,
'cell_bounds': ds.lon_bnds.values}]
try:
mpas.write_cmor(axes, ds, VAR_NAME, VAR_UNITS)
except Exception:
return ""
return VAR_NAME
| 28.233333
| 76
| 0.648564
|
from __future__ import absolute_import, division, print_function
import xarray
import logging
from e3sm_to_cmip import mpas
from e3sm_to_cmip.util import print_message
RAW_VARIABLES = ['MPASO', 'MPAS_mesh', 'MPAS_map']
VAR_NAME = 'zos'
VAR_UNITS = 'm'
TABLE = 'CMIP6_Omon.json'
def handle(infiles, tables, user_input_path, **kwargs):
if kwargs.get('simple'):
msg = f"{VAR_NAME} is not supported for simple conversion"
print_message(msg)
return
msg = 'Starting {name}'.format(name=__name__)
logging.info(msg)
meshFileName = infiles['MPAS_mesh']
mappingFileName = infiles['MPAS_map']
timeSeriesFiles = infiles['MPASO']
dsMesh = xarray.open_dataset(meshFileName, mask_and_scale=False)
cellMask2D, _ = mpas.get_cell_masks(dsMesh)
areaCell = dsMesh.areaCell.where(cellMask2D)
variableList = ['timeMonthly_avg_pressureAdjustedSSH',
'xtime_startMonthly', 'xtime_endMonthly']
ds = xarray.Dataset()
with mpas.open_mfdataset(timeSeriesFiles, variableList) as dsIn:
ssh = dsIn.timeMonthly_avg_pressureAdjustedSSH.where(cellMask2D)
sshAvg = (ssh*areaCell).sum(dim='nCells')/areaCell.sum(dim='nCells')
ds[VAR_NAME] = ssh - sshAvg
ds = mpas.add_time(ds, dsIn)
ds.compute()
ds = mpas.remap(ds, mappingFileName)
mpas.setup_cmor(VAR_NAME, tables, user_input_path, component='ocean')
axes = [{'table_entry': 'time',
'units': ds.time.units},
{'table_entry': 'latitude',
'units': 'degrees_north',
'coord_vals': ds.lat.values,
'cell_bounds': ds.lat_bnds.values},
{'table_entry': 'longitude',
'units': 'degrees_east',
'coord_vals': ds.lon.values,
'cell_bounds': ds.lon_bnds.values}]
try:
mpas.write_cmor(axes, ds, VAR_NAME, VAR_UNITS)
except Exception:
return ""
return VAR_NAME
| true
| true
|
f719619a72f865368354a85f0cae0766341013ef
| 1,087
|
py
|
Python
|
supports/mover-performance-test/run_ssm_mover.py
|
MajorJason/SSM
|
3341585165ac10a47ddeed0e1d5e2467db482b99
|
[
"Apache-2.0"
] | 199
|
2017-04-19T06:37:24.000Z
|
2022-03-31T12:14:22.000Z
|
supports/mover-performance-test/run_ssm_mover.py
|
Dam1029/SSM
|
d459811728980258f4ebd0b81022620b750863fe
|
[
"Apache-2.0"
] | 1,091
|
2017-04-14T07:09:55.000Z
|
2022-01-20T11:15:54.000Z
|
supports/mover-performance-test/run_ssm_mover.py
|
Dam1029/SSM
|
d459811728980258f4ebd0b81022620b750863fe
|
[
"Apache-2.0"
] | 170
|
2017-04-14T03:45:30.000Z
|
2022-03-31T12:14:24.000Z
|
import sys
import time
from util import *
size = sys.argv[1]
num = sys.argv[2]
#The data dir is named by case. Please see prepare.sh
case = size + "_" + num
log = sys.argv[3]
#Either "allssd" or "alldisk"
action = sys.argv[4]
if action == "allssd":
rid = submit_rule("file: path matches \"/" + case + "/*\"| allssd")
elif action == "alldisk":
rid = submit_rule("file: path matches \"/" + case + "/*\"| alldisk")
start_rule(rid)
start_time = time.time()
rule = get_rule(rid)
last_checked = rule['numChecked']
last_cmdsgen = rule['numCmdsGen']
time.sleep(.1)
#Check whether all expected cmdlets have been generated.
#The overall cmdlets' num should equal to the test files' num,
#if not, wait for more cmdlets to be generated.
cids = get_cids_of_rule(rid)
while len(cids) < int(num):
time.sleep(.1)
rule = get_rule(rid)
cids = get_cids_of_rule(rid)
time.sleep(.1)
cids = get_cids_of_rule(rid)
wait_cmdlets(cids)
end_time = time.time()
stop_rule(rid)
# append result to log file
f = open(log, 'a')
f.write(str(int(end_time - start_time)) + "s" + " " + '\n')
f.close()
| 24.155556
| 72
| 0.678933
|
import sys
import time
from util import *
size = sys.argv[1]
num = sys.argv[2]
case = size + "_" + num
log = sys.argv[3]
action = sys.argv[4]
if action == "allssd":
rid = submit_rule("file: path matches \"/" + case + "/*\"| allssd")
elif action == "alldisk":
rid = submit_rule("file: path matches \"/" + case + "/*\"| alldisk")
start_rule(rid)
start_time = time.time()
rule = get_rule(rid)
last_checked = rule['numChecked']
last_cmdsgen = rule['numCmdsGen']
time.sleep(.1)
cids = get_cids_of_rule(rid)
while len(cids) < int(num):
time.sleep(.1)
rule = get_rule(rid)
cids = get_cids_of_rule(rid)
time.sleep(.1)
cids = get_cids_of_rule(rid)
wait_cmdlets(cids)
end_time = time.time()
stop_rule(rid)
f = open(log, 'a')
f.write(str(int(end_time - start_time)) + "s" + " " + '\n')
f.close()
| true
| true
|
f71962cca1d8ea20c86aa01378d38c3db9829b67
| 9,032
|
py
|
Python
|
example_conda_pkg/descriptors.py
|
dajtmullaj/example_conda_pkg
|
7c2bf657d14c714608e653d7218fa3cd658a6297
|
[
"MIT"
] | null | null | null |
example_conda_pkg/descriptors.py
|
dajtmullaj/example_conda_pkg
|
7c2bf657d14c714608e653d7218fa3cd658a6297
|
[
"MIT"
] | null | null | null |
example_conda_pkg/descriptors.py
|
dajtmullaj/example_conda_pkg
|
7c2bf657d14c714608e653d7218fa3cd658a6297
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 3 21:21:19 2020
Project: chemplot (Chemical Space Visualization)
Content: Descriptor operation methods
@author: murat cihan sorkun
"""
from rdkit import Chem
from rdkit.Chem import AllChem
import pandas as pd
import math
import mordred
from mordred import Calculator, descriptors #Dont remove these imports
from sklearn.linear_model import Lasso, LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.preprocessing import StandardScaler
def get_mordred_descriptors(smiles_list):
"""
Calculates the Mordred descriptors for given smiles list
:param smiles_list: List of smiles
:type smiles_list: list
:returns: The calculated descriptors list for the given smiles
:rtype: Dataframe
"""
return generate_mordred_descriptors(smiles_list, Chem.MolFromSmiles, 'SMILES')
def get_mordred_descriptors_from_inchi(inchi_list):
"""
Calculates the Mordred descriptors for given InChi list
:param inchi_list: List of InChi
:type inchi_list: list
:returns: The calculated descriptors list for the given smiles
:rtype: Dataframe
"""
return generate_mordred_descriptors(inchi_list, Chem.MolFromInchi, 'InChi')
def generate_mordred_descriptors(encoding_list, encoding_function, encoding_name):
"""
Calculates the Mordred descriptors for list of molecules encodings
:param smiles_list: List of molecules encodings
:type smiles_list: list
:returns: The calculated descriptors list for the given molecules encodings
:rtype: Dataframe
"""
calc = mordred.Calculator()
calc.register(mordred.AtomCount) #16
calc.register(mordred.RingCount) #139
calc.register(mordred.BondCount) #9
calc.register(mordred.HydrogenBond) #2
calc.register(mordred.CarbonTypes) #10
calc.register(mordred.SLogP) #2
calc.register(mordred.Constitutional) #16
calc.register(mordred.TopoPSA) #2
calc.register(mordred.Weight) #2
calc.register(mordred.Polarizability) #2
calc.register(mordred.McGowanVolume) #1
name_list=[]
for desc_name in calc.descriptors:
name_list.append(str(desc_name))
descriptors_list=[]
erroneous_encodings=[]
encodings_none_descriptors=[]
for encoding in encoding_list:
mol=encoding_function(encoding)
if mol is None:
descriptors_list.append([None]*len(name_list))
erroneous_encodings.append(encoding)
else:
mol=Chem.AddHs(mol)
calculated_descriptors = calc(mol)
for i in range(len(calculated_descriptors._values)):
if math.isnan(calculated_descriptors._values[i]):
calculated_descriptors._values = [None]*len(name_list)
encodings_none_descriptors.append(encoding)
break
descriptors_list.append(calculated_descriptors._values)
if len(erroneous_encodings)>0:
print("The following erroneous {} have been found in the data:\n{}.\nThe erroneous {} will be removed from the data.".format(encoding_name, '\n'.join(map(str, erroneous_encodings)), encoding_name))
if len(encodings_none_descriptors)>0:
print("For the following {} not all descriptors can be computed:\n{}.\nThese {} will be removed from the data.".format(encoding_name, '\n'.join(map(str, encodings_none_descriptors)), encoding_name))
df_descriptors=pd.DataFrame(descriptors_list,columns=name_list)
df_descriptors = df_descriptors.select_dtypes(exclude=['object'])
return df_descriptors
def select_descriptors_lasso(df_descriptors,target_list, R_select=0.05, C_select=0.05, kind="R"):
"""
Selects descriptors by LASSO
:param df_descriptors: descriptors of molecules
:type df_descriptors: Dataframe
:param target_list: list of target values
:type target_list: list
:param R_select: alpha value for Lasso
:type R_select: float
:param C_select: C value for LogisticRegression
:type C_select: float
:param kind: kind of target R->Regression C->Classification
:type kind: string
:returns: The selected descriptors
:rtype: Dataframe
"""
# Remove erroneous data
df_descriptors = df_descriptors.assign(target=target_list.values)
df_descriptors = df_descriptors.dropna(how='any')
target_list = df_descriptors['target'].to_list()
df_descriptors = df_descriptors.drop(columns=['target'])
df_descriptors_scaled = StandardScaler().fit_transform(df_descriptors)
if(kind=="C"):
model = LogisticRegression(C=C_select,penalty='l1', solver='liblinear',random_state=1).fit(df_descriptors_scaled, target_list)
else:
model = Lasso(alpha=R_select,max_iter=10000,random_state=1).fit(df_descriptors_scaled, target_list)
selected = SelectFromModel(model, prefit=True)
X_new_lasso = selected.transform(df_descriptors)
# Get back the kept features as a DataFrame with dropped columns as all 0s
selected_features = pd.DataFrame(selected.inverse_transform(X_new_lasso), index=df_descriptors.index, columns=df_descriptors.columns)
# Dropped columns have values of all 0s, keep other columns
selected_columns_lasso = selected_features.columns[selected_features.var() != 0]
selected_data = df_descriptors[selected_columns_lasso]
return selected_data, target_list
def get_ecfp(smiles_list, target_list, radius=2, nBits=2048):
"""
Calculates the ECFP fingerprint for given SMILES list
:param smiles_list: List of SMILES
:type smiles_list: list
:param radius: The ECPF fingerprints radius.
:type radius: int
:param nBits: The number of bits of the fingerprint vector.
:type nBits: int
:returns: The calculated ECPF fingerprints for the given SMILES
:rtype: Dataframe
"""
return generate_ecfp(smiles_list, Chem.MolFromSmiles, 'SMILES', target_list, radius=2, nBits=2048)
def get_ecfp_from_inchi(inchi_list, target_list, radius=2, nBits=2048):
"""
Calculates the ECFP fingerprint for given InChi list
:param inchi_list: List of InChi
:type inchi_list: list
:param radius: The ECPF fingerprints radius.
:type radius: int
:param nBits: The number of bits of the fingerprint vector.
:type nBits: int
:returns: The calculated ECPF fingerprints for the given InChi
:rtype: Dataframe
"""
return generate_ecfp(inchi_list, Chem.MolFromInchi, 'InChi', target_list, radius=2, nBits=2048)
def generate_ecfp(encoding_list, encoding_function, encoding_name, target_list, radius=2, nBits=2048):
"""
Calculates the ECFP fingerprint for given list of molecules encodings
:param encoding_list: List of molecules encodings
:type encoding_list: list
:param encoding_function: Function used to extract the molecules from the encodings
:type encoding_function: fun
:param radius: The ECPF fingerprints radius.
:type radius: int
:param nBits: The number of bits of the fingerprint vector.
:type nBits: int
:returns: The calculated ECPF fingerprints for the given molecules encodings
:rtype: Dataframe
"""
# Generate ECFP fingerprints
ecfp_fingerprints=[]
erroneous_encodings=[]
for encoding in encoding_list:
mol=encoding_function(encoding)
if mol is None:
ecfp_fingerprints.append([None]*nBits)
erroneous_encodings.append(encoding)
else:
mol=Chem.AddHs(mol)
list_bits_fingerprint = []
list_bits_fingerprint[:0] = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits).ToBitString()
ecfp_fingerprints.append(list_bits_fingerprint)
# Create dataframe of fingerprints
df_ecfp_fingerprints = pd.DataFrame(data = ecfp_fingerprints, index = encoding_list)
# Remove erroneous data
if len(erroneous_encodings)>0:
print("The following erroneous {} have been found in the data:\n{}.\nThe erroneous {} will be removed from the data.".format(encoding_name, '\n'.join(map(str, erroneous_encodings)), encoding_name))
if len(target_list)>0:
df_ecfp_fingerprints = df_ecfp_fingerprints.assign(target=target_list.values)
df_ecfp_fingerprints = df_ecfp_fingerprints.dropna(how='any')
if len(target_list)>0:
target_list = df_ecfp_fingerprints['target'].to_list()
df_ecfp_fingerprints = df_ecfp_fingerprints.drop(columns=['target'])
# Remove bit columns with no variablity (all "0" or all "1")
df_ecfp_fingerprints = df_ecfp_fingerprints.loc[:, (df_ecfp_fingerprints != 0).any(axis=0)]
df_ecfp_fingerprints = df_ecfp_fingerprints.loc[:, (df_ecfp_fingerprints != 1).any(axis=0)]
return df_ecfp_fingerprints, target_list
| 39.441048
| 206
| 0.704052
|
from rdkit import Chem
from rdkit.Chem import AllChem
import pandas as pd
import math
import mordred
from mordred import Calculator, descriptors
from sklearn.linear_model import Lasso, LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.preprocessing import StandardScaler
def get_mordred_descriptors(smiles_list):
return generate_mordred_descriptors(smiles_list, Chem.MolFromSmiles, 'SMILES')
def get_mordred_descriptors_from_inchi(inchi_list):
return generate_mordred_descriptors(inchi_list, Chem.MolFromInchi, 'InChi')
def generate_mordred_descriptors(encoding_list, encoding_function, encoding_name):
calc = mordred.Calculator()
calc.register(mordred.AtomCount)
calc.register(mordred.RingCount)
calc.register(mordred.BondCount)
calc.register(mordred.HydrogenBond)
calc.register(mordred.CarbonTypes)
calc.register(mordred.SLogP)
calc.register(mordred.Constitutional)
calc.register(mordred.TopoPSA)
calc.register(mordred.Weight)
calc.register(mordred.Polarizability)
calc.register(mordred.McGowanVolume)
name_list=[]
for desc_name in calc.descriptors:
name_list.append(str(desc_name))
descriptors_list=[]
erroneous_encodings=[]
encodings_none_descriptors=[]
for encoding in encoding_list:
mol=encoding_function(encoding)
if mol is None:
descriptors_list.append([None]*len(name_list))
erroneous_encodings.append(encoding)
else:
mol=Chem.AddHs(mol)
calculated_descriptors = calc(mol)
for i in range(len(calculated_descriptors._values)):
if math.isnan(calculated_descriptors._values[i]):
calculated_descriptors._values = [None]*len(name_list)
encodings_none_descriptors.append(encoding)
break
descriptors_list.append(calculated_descriptors._values)
if len(erroneous_encodings)>0:
print("The following erroneous {} have been found in the data:\n{}.\nThe erroneous {} will be removed from the data.".format(encoding_name, '\n'.join(map(str, erroneous_encodings)), encoding_name))
if len(encodings_none_descriptors)>0:
print("For the following {} not all descriptors can be computed:\n{}.\nThese {} will be removed from the data.".format(encoding_name, '\n'.join(map(str, encodings_none_descriptors)), encoding_name))
df_descriptors=pd.DataFrame(descriptors_list,columns=name_list)
df_descriptors = df_descriptors.select_dtypes(exclude=['object'])
return df_descriptors
def select_descriptors_lasso(df_descriptors,target_list, R_select=0.05, C_select=0.05, kind="R"):
df_descriptors = df_descriptors.assign(target=target_list.values)
df_descriptors = df_descriptors.dropna(how='any')
target_list = df_descriptors['target'].to_list()
df_descriptors = df_descriptors.drop(columns=['target'])
df_descriptors_scaled = StandardScaler().fit_transform(df_descriptors)
if(kind=="C"):
model = LogisticRegression(C=C_select,penalty='l1', solver='liblinear',random_state=1).fit(df_descriptors_scaled, target_list)
else:
model = Lasso(alpha=R_select,max_iter=10000,random_state=1).fit(df_descriptors_scaled, target_list)
selected = SelectFromModel(model, prefit=True)
X_new_lasso = selected.transform(df_descriptors)
selected_features = pd.DataFrame(selected.inverse_transform(X_new_lasso), index=df_descriptors.index, columns=df_descriptors.columns)
selected_columns_lasso = selected_features.columns[selected_features.var() != 0]
selected_data = df_descriptors[selected_columns_lasso]
return selected_data, target_list
def get_ecfp(smiles_list, target_list, radius=2, nBits=2048):
return generate_ecfp(smiles_list, Chem.MolFromSmiles, 'SMILES', target_list, radius=2, nBits=2048)
def get_ecfp_from_inchi(inchi_list, target_list, radius=2, nBits=2048):
return generate_ecfp(inchi_list, Chem.MolFromInchi, 'InChi', target_list, radius=2, nBits=2048)
def generate_ecfp(encoding_list, encoding_function, encoding_name, target_list, radius=2, nBits=2048):
ecfp_fingerprints=[]
erroneous_encodings=[]
for encoding in encoding_list:
mol=encoding_function(encoding)
if mol is None:
ecfp_fingerprints.append([None]*nBits)
erroneous_encodings.append(encoding)
else:
mol=Chem.AddHs(mol)
list_bits_fingerprint = []
list_bits_fingerprint[:0] = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits).ToBitString()
ecfp_fingerprints.append(list_bits_fingerprint)
df_ecfp_fingerprints = pd.DataFrame(data = ecfp_fingerprints, index = encoding_list)
if len(erroneous_encodings)>0:
print("The following erroneous {} have been found in the data:\n{}.\nThe erroneous {} will be removed from the data.".format(encoding_name, '\n'.join(map(str, erroneous_encodings)), encoding_name))
if len(target_list)>0:
df_ecfp_fingerprints = df_ecfp_fingerprints.assign(target=target_list.values)
df_ecfp_fingerprints = df_ecfp_fingerprints.dropna(how='any')
if len(target_list)>0:
target_list = df_ecfp_fingerprints['target'].to_list()
df_ecfp_fingerprints = df_ecfp_fingerprints.drop(columns=['target'])
df_ecfp_fingerprints = df_ecfp_fingerprints.loc[:, (df_ecfp_fingerprints != 0).any(axis=0)]
df_ecfp_fingerprints = df_ecfp_fingerprints.loc[:, (df_ecfp_fingerprints != 1).any(axis=0)]
return df_ecfp_fingerprints, target_list
| true
| true
|
f71962fa2355f2d2493a845f387b9126cf69d7d6
| 69,118
|
py
|
Python
|
neutron/tests/unit/api/v2/test_base.py
|
mcadariu/neutron
|
35494af5a25efb8b314941ab85b44923654f6acc
|
[
"Apache-2.0"
] | 1
|
2018-07-04T07:59:31.000Z
|
2018-07-04T07:59:31.000Z
|
neutron/tests/unit/api/v2/test_base.py
|
ljzjohnson/neutron
|
d78664321482c15981a09642985a540195e754e3
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/api/v2/test_base.py
|
ljzjohnson/neutron
|
d78664321482c15981a09642985a540195e754e3
|
[
"Apache-2.0"
] | 1
|
2018-08-28T17:13:16.000Z
|
2018-08-28T17:13:16.000Z
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from neutron_lib.api import attributes
from neutron_lib.api import converters
from neutron_lib.callbacks import registry
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as n_exc
from neutron_lib import fixture
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_policy import policy as oslo_policy
from oslo_utils import uuidutils
import six
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
from neutron.tests import base
from neutron.tests import fake_notifier
from neutron.tests import tools
from neutron.tests.unit import dummy_plugin
from neutron.tests.unit import testlib_api
EXTDIR = os.path.join(base.ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def _get_path(resource, id=None, action=None,
fmt=None, endpoint=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if endpoint is not None:
path = path + '/%s' % endpoint
if fmt is not None:
path = path + '.%s' % fmt
return path
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Create the default configurations
self.config_parse()
# Update the plugin
self.setup_coreplugin(plugin, load_plugins=False)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance.supported_extension_aliases = ['empty-string-filtering']
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
tools.make_mock_plugin_json_encodable(instance)
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
# APIRouter initialization resets policy module, re-initializing it
policy.init()
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
@staticmethod
def _get_policy_attrs(attr_info):
policy_attrs = {name for (name, info) in attr_info.items()
if info.get('required_by_policy')}
if 'tenant_id' in policy_attrs:
policy_attrs.add('project_id')
return sorted(policy_attrs)
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCES[resource]
policy_attrs = self._get_policy_attrs(attr_info)
for name, info in attr_info.items():
if info.get('primary_key'):
policy_attrs.append(name)
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=None, **kwargs):
skipargs = skipargs or []
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict(
(arg, mock.ANY) for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['bar', 'foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {'name': ['']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {'name': ['', '']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar', '']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
self.assertIn('abc', res)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance.get_networks.reset_mock()
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'project_id',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(n_exc.Invalid, router.APIRouter)
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertIn('networks', res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
# expect full list returned
self.assertEqual(1, len(res['networks']))
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in input_dict.items():
self.assertEqual(v, output_dict[k])
else:
# expect no results
self.assertEqual(0, len(res['networks']))
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(2, len(res['networks']))
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(1, len(next_links))
self.assertEqual(1, len(previous_links))
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(params, urlparse.parse_qs(url.query))
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(params, urlparse.parse_qs(url.query))
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(1, len(res['networks']))
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(1, len(previous_links))
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(expect_params, urlparse.parse_qs(url.query))
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual([], res['networks'])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(1, len(previous_links))
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(expect_params, urlparse.parse_qs(url.query))
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(1, len(next_links))
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(expected_params,
urlparse.parse_qs(url.query))
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual([], res['networks'])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(1, len(next_links))
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(expect_params, urlparse.parse_qs(url.query))
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net_id, net['id'])
self.assertEqual("ACTIVE", net['status'])
def test_create_use_defaults(self):
net_id = _uuid()
tenant_id = _uuid()
initial_input = {'network': {'name': 'net1',
'tenant_id': tenant_id,
'project_id': tenant_id}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net_id, net['id'])
self.assertTrue(net['admin_state_up'])
self.assertEqual("ACTIVE", net['status'])
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
self._test_create_failure_bad_request('networks', data)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id,
'project_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
self._test_create_failure_bad_request('networks', data,
extra_environ=env)
def test_create_no_body(self):
data = {'whoa': None}
self._test_create_failure_bad_request('networks', data)
def test_create_body_string_not_json(self):
data = 'a string'
self._test_create_failure_bad_request('networks', data)
def test_create_body_boolean_not_json(self):
data = True
self._test_create_failure_bad_request('networks', data)
def test_create_no_resource(self):
data = {}
self._test_create_failure_bad_request('networks', data)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
self._test_create_failure_bad_request('ports', data)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
self._test_create_failure_bad_request('networks', data)
def test_create_with_too_long_name(self):
data = {'network': {'name': "12345678" * 32,
'admin_state_up': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def _test_create_failure_bad_request(self, resource, data, **kwargs):
res = self.api.post(_get_path(resource, fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True, **kwargs)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_create_bulk_networks_none(self):
self._test_create_failure_bad_request('networks', {'networks': None})
def test_create_bulk_networks_empty_list(self):
self._test_create_failure_bad_request('networks', {'networks': []})
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': constants.ATTR_NOT_SPECIFIED,
'fixed_ips': constants.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {
'tenant_id': six.text_type(tenant_id)
}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(net_id, port['network_id'])
self.assertEqual('ca:fe:de:ad:be:ef', port['mac_address'])
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net_id, net['id'])
self.assertEqual("ACTIVE", net['status'])
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(expected_code, res.status_int)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(expected_code, res.status_int)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
# Inject rule in policy engine
rules = oslo_policy.Rules.from_dict(
{'get_network:name': "rule:admin_only"})
policy.set_rules(rules, overwrite=False)
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
self.assertNotIn('name', res['network'])
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
# Ensure id attribute is included in fields returned by GET call
# in update procedure.
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_keystone_no_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id, None,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_retry_on_index(self):
instance = self.plugin.return_value
instance.get_networks.side_effect = [db_exc.RetryRequest(None), []]
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks', fmt=self.fmt))
self.assertTrue(instance.get_networks.called)
def test_retry_on_show(self):
instance = self.plugin.return_value
instance.get_network.side_effect = [db_exc.RetryRequest(None), {}]
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks', _uuid(), fmt=self.fmt))
self.assertTrue(instance.get_network.called)
class SubresourceTest(base.BaseTestCase):
def setUp(self):
super(SubresourceTest, self).setUp()
raise self.skipException('this class will be deleted')
plugin = 'neutron.tests.unit.api.v2.test_base.TestSubresourcePlugin'
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(fixture.APIDefinitionFixture())
self.config_parse()
self.setup_coreplugin(plugin, load_plugins=False)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
api = router.APIRouter()
SUB_RESOURCES = {}
RESOURCE_ATTRIBUTE_MAP = {}
SUB_RESOURCES[dummy_plugin.RESOURCE_NAME] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
collection_name = SUB_RESOURCES[
dummy_plugin.RESOURCE_NAME].get('collection_name')
resource_name = dummy_plugin.RESOURCE_NAME
parent = SUB_RESOURCES[dummy_plugin.RESOURCE_NAME].get('parent')
params = RESOURCE_ATTRIBUTE_MAP['dummies']
member_actions = {'mactions': 'GET'}
_plugin = directory.get_plugin()
controller = v2_base.create_resource(collection_name, resource_name,
_plugin, params,
member_actions=member_actions,
parent=parent,
allow_bulk=True,
allow_pagination=True,
allow_sorting=True)
path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
parent['member_name'],
collection_name)
mapper_kwargs = dict(controller=controller,
path_prefix=path_prefix)
api.map.collection(collection_name, resource_name, **mapper_kwargs)
api.map.resource(collection_name, collection_name,
controller=controller,
parent_resource=parent,
member=member_actions)
self.api = webtest.TestApp(api)
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
tenant_id = _uuid()
body = {
dummy_plugin.RESOURCE_NAME: {
'foo': 'bar', 'tenant_id': tenant_id,
'project_id': tenant_id
}
}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {dummy_plugin.RESOURCE_NAME: {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_update_subresource_to_none(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {dummy_plugin.RESOURCE_NAME: {}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
def test_sub_resource_member_actions(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id,
action='mactions'))
instance.mactions.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCES[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertNotIn('fake', res)
for key in keys:
self.assertIn(key, res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def setUp(self):
super(NotificationTest, self).setUp()
fake_notifier.reset()
def _resource_op_notifier(self, opname, resource, expected_errors=False):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
'.'.join([resource, opname, "end"]))
self.assertEqual(len(expected_events),
len(fake_notifier.NOTIFICATIONS))
for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events):
self.assertEqual('INFO', msg['priority'])
self.assertEqual(event, msg['event_type'])
if opname == 'delete' and event == 'network.delete.end':
self.assertIn('payload', msg)
resource = msg['payload']
self.assertIn('network_id', resource)
self.assertIn('network', resource)
self.assertEqual(expected_code, res.status_int)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
class RegistryNotificationTest(APIv2TestBase):
def setUp(self):
# This test does not have database support so tracking cannot be used
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(RegistryNotificationTest, self).setUp()
def _test_registry_notify(self, opname, resource, initial_input=None):
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(registry, 'publish') as notify:
if opname == 'create':
res = self.api.post_json(
_get_path('networks'),
initial_input)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input)
expected_code = exc.HTTPOk.code
if opname == 'delete':
res = self.api.delete(_get_path('networks', id=_uuid()))
expected_code = exc.HTTPNoContent.code
self.assertTrue(notify.called)
self.assertEqual(expected_code, res.status_int)
def test_network_create_registry_notify(self):
input = {'network': {'name': 'net',
'tenant_id': _uuid()}}
self._test_registry_notify('create', 'network', input)
def test_network_delete_registry_notify(self):
self._test_registry_notify('delete', 'network')
def test_network_update_registry_notify(self):
input = {'network': {'name': 'net'}}
self._test_registry_notify('update', 'network', input)
def test_networks_create_bulk_registry_notify(self):
input = {'networks': [{'name': 'net1',
'tenant_id': _uuid()},
{'name': 'net2',
'tenant_id': _uuid()}]}
self._test_registry_notify('create', 'network', input)
class QuotaTest(APIv2TestBase):
def setUp(self):
# This test does not have database support so tracking cannot be used
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(QuotaTest, self).setUp()
# Use mock to let the API use a different QuotaEngine instance for
# unit test in this class. This will ensure resource are registered
# again and instantiated with neutron.quota.resource.CountableResource
replacement_registry = resource_registry.ResourceRegistry()
registry_patcher = mock.patch('neutron.quota.resource_registry.'
'ResourceRegistry.get_instance')
mock_registry = registry_patcher.start().return_value
mock_registry.get_resource = replacement_registry.get_resource
mock_registry.resources = replacement_registry.resources
# Register a resource
replacement_registry.register_resource_by_name('network')
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
class ExtensionTestCase(base.BaseTestCase):
def setUp(self):
# This test does not have database support so tracking cannot be used
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(fixture.APIDefinitionFixture())
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin(plugin, load_plugins=False)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Instantiate mock plugin and enable the V2attributes extension
self.plugin.return_value.supported_extension_aliases = ["v2attrs"]
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def test_extended_create(self):
net_id = _uuid()
tenant_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': tenant_id,
'project_id': tenant_id,
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
self.assertIn('network', res.json)
net = res.json['network']
self.assertEqual(net_id, net['id'])
self.assertEqual("ACTIVE", net['status'])
self.assertEqual("123", net['v2attrs:something'])
self.assertNotIn('v2attrs:something_else', net)
class TestSubresourcePlugin(object):
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
def mactions(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(expect_val, sorted(actual_val))
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class SortingTestCase(base.BaseTestCase):
def test_get_sorts(self):
path = '/?sort_key=foo&sort_dir=desc&sort_key=bar&sort_dir=asc'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}, 'bar': {'key': 'val'}}
expect_val = [('foo', False), ('bar', True)]
actual_val = api_common.get_sorts(request, attr_info)
self.assertEqual(expect_val, actual_val)
def test_get_sorts_with_project_id(self):
path = '/?sort_key=project_id&sort_dir=desc'
request = webob.Request.blank(path)
attr_info = {'tenant_id': {'key': 'val'}}
expect_val = [('project_id', False)]
actual_val = api_common.get_sorts(request, attr_info)
self.assertEqual(expect_val, actual_val)
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {},
["fields"]))
@mock.patch('neutron.api.api_common.is_empty_string_filtering_supported',
return_value=False)
def test_blank_values(self, mock_is_supported):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
@mock.patch('neutron.api.api_common.is_empty_string_filtering_supported',
return_value=True)
def test_blank_values_with_filtering_supported(self, mock_is_supported):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({'foo': [''], 'bar': [''], 'baz': [''], 'qux': ['']},
api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(expect_val, actual_val)
def test_attr_info_with_project_info_populated(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'tenant_id': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(expect_val, actual_val)
expect_attr_info = {'tenant_id': {'key': 'val'},
'project_id': {'key': 'val'}}
self.assertEqual(expect_attr_info, attr_info)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(expect_val, actual_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': converters.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertOrderedEqual(expect_val, actual_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': converters.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(expect_val, actual_val)
def test_attr_info_with_base_db_attributes(self):
path = '/?__contains__=1&__class__=2'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify)
| 42.017021
| 79
| 0.589036
|
import os
import mock
from neutron_lib.api import attributes
from neutron_lib.api import converters
from neutron_lib.callbacks import registry
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as n_exc
from neutron_lib import fixture
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_policy import policy as oslo_policy
from oslo_utils import uuidutils
import six
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
from neutron.tests import base
from neutron.tests import fake_notifier
from neutron.tests import tools
from neutron.tests.unit import dummy_plugin
from neutron.tests.unit import testlib_api
EXTDIR = os.path.join(base.ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def _get_path(resource, id=None, action=None,
fmt=None, endpoint=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if endpoint is not None:
path = path + '/%s' % endpoint
if fmt is not None:
path = path + '.%s' % fmt
return path
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
extensions.PluginAwareExtensionManager._instance = None
self.config_parse()
self.setup_coreplugin(plugin, load_plugins=False)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance.supported_extension_aliases = ['empty-string-filtering']
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
tools.make_mock_plugin_json_encodable(instance)
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
policy.init()
class _ArgMatcher(object):
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
@staticmethod
def _get_policy_attrs(attr_info):
policy_attrs = {name for (name, info) in attr_info.items()
if info.get('required_by_policy')}
if 'tenant_id' in policy_attrs:
policy_attrs.add('project_id')
return sorted(policy_attrs)
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCES[resource]
policy_attrs = self._get_policy_attrs(attr_info)
for name, info in attr_info.items():
if info.get('primary_key'):
policy_attrs.append(name)
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=None, **kwargs):
skipargs = skipargs or []
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict(
(arg, mock.ANY) for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['bar', 'foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {'name': ['']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {'name': ['', '']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar', '']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
self.assertIn('abc', res)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance.get_networks.reset_mock()
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'project_id',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(n_exc.Invalid, router.APIRouter)
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertIn('networks', res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
self.assertEqual(1, len(res['networks']))
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in input_dict.items():
self.assertEqual(v, output_dict[k])
else:
self.assertEqual(0, len(res['networks']))
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(2, len(res['networks']))
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(1, len(next_links))
self.assertEqual(1, len(previous_links))
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(params, urlparse.parse_qs(url.query))
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(params, urlparse.parse_qs(url.query))
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(1, len(res['networks']))
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(1, len(previous_links))
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(expect_params, urlparse.parse_qs(url.query))
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual([], res['networks'])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(1, len(previous_links))
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(expect_params, urlparse.parse_qs(url.query))
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(1, len(next_links))
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(expected_params,
urlparse.parse_qs(url.query))
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual([], res['networks'])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(1, len(next_links))
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(expect_params, urlparse.parse_qs(url.query))
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net_id, net['id'])
self.assertEqual("ACTIVE", net['status'])
def test_create_use_defaults(self):
net_id = _uuid()
tenant_id = _uuid()
initial_input = {'network': {'name': 'net1',
'tenant_id': tenant_id,
'project_id': tenant_id}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net_id, net['id'])
self.assertTrue(net['admin_state_up'])
self.assertEqual("ACTIVE", net['status'])
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
self._test_create_failure_bad_request('networks', data)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id,
'project_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
self._test_create_failure_bad_request('networks', data,
extra_environ=env)
def test_create_no_body(self):
data = {'whoa': None}
self._test_create_failure_bad_request('networks', data)
def test_create_body_string_not_json(self):
data = 'a string'
self._test_create_failure_bad_request('networks', data)
def test_create_body_boolean_not_json(self):
data = True
self._test_create_failure_bad_request('networks', data)
def test_create_no_resource(self):
data = {}
self._test_create_failure_bad_request('networks', data)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
self._test_create_failure_bad_request('ports', data)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
self._test_create_failure_bad_request('networks', data)
def test_create_with_too_long_name(self):
data = {'network': {'name': "12345678" * 32,
'admin_state_up': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def _test_create_failure_bad_request(self, resource, data, **kwargs):
res = self.api.post(_get_path(resource, fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True, **kwargs)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_create_bulk_networks_none(self):
self._test_create_failure_bad_request('networks', {'networks': None})
def test_create_bulk_networks_empty_list(self):
self._test_create_failure_bad_request('networks', {'networks': []})
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': constants.ATTR_NOT_SPECIFIED,
'fixed_ips': constants.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {
'tenant_id': six.text_type(tenant_id)
}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(net_id, port['network_id'])
self.assertEqual('ca:fe:de:ad:be:ef', port['mac_address'])
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net_id, net['id'])
self.assertEqual("ACTIVE", net['status'])
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(expected_code, res.status_int)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(expected_code, res.status_int)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
rules = oslo_policy.Rules.from_dict(
{'get_network:name': "rule:admin_only"})
policy.set_rules(rules, overwrite=False)
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
self.assertNotIn('name', res['network'])
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_keystone_no_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id, None,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_retry_on_index(self):
instance = self.plugin.return_value
instance.get_networks.side_effect = [db_exc.RetryRequest(None), []]
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks', fmt=self.fmt))
self.assertTrue(instance.get_networks.called)
def test_retry_on_show(self):
instance = self.plugin.return_value
instance.get_network.side_effect = [db_exc.RetryRequest(None), {}]
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks', _uuid(), fmt=self.fmt))
self.assertTrue(instance.get_network.called)
class SubresourceTest(base.BaseTestCase):
def setUp(self):
super(SubresourceTest, self).setUp()
raise self.skipException('this class will be deleted')
plugin = 'neutron.tests.unit.api.v2.test_base.TestSubresourcePlugin'
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(fixture.APIDefinitionFixture())
self.config_parse()
self.setup_coreplugin(plugin, load_plugins=False)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
api = router.APIRouter()
SUB_RESOURCES = {}
RESOURCE_ATTRIBUTE_MAP = {}
SUB_RESOURCES[dummy_plugin.RESOURCE_NAME] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
collection_name = SUB_RESOURCES[
dummy_plugin.RESOURCE_NAME].get('collection_name')
resource_name = dummy_plugin.RESOURCE_NAME
parent = SUB_RESOURCES[dummy_plugin.RESOURCE_NAME].get('parent')
params = RESOURCE_ATTRIBUTE_MAP['dummies']
member_actions = {'mactions': 'GET'}
_plugin = directory.get_plugin()
controller = v2_base.create_resource(collection_name, resource_name,
_plugin, params,
member_actions=member_actions,
parent=parent,
allow_bulk=True,
allow_pagination=True,
allow_sorting=True)
path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
parent['member_name'],
collection_name)
mapper_kwargs = dict(controller=controller,
path_prefix=path_prefix)
api.map.collection(collection_name, resource_name, **mapper_kwargs)
api.map.resource(collection_name, collection_name,
controller=controller,
parent_resource=parent,
member=member_actions)
self.api = webtest.TestApp(api)
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
tenant_id = _uuid()
body = {
dummy_plugin.RESOURCE_NAME: {
'foo': 'bar', 'tenant_id': tenant_id,
'project_id': tenant_id
}
}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {dummy_plugin.RESOURCE_NAME: {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_update_subresource_to_none(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {dummy_plugin.RESOURCE_NAME: {}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
def test_sub_resource_member_actions(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id,
action='mactions'))
instance.mactions.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCES[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertNotIn('fake', res)
for key in keys:
self.assertIn(key, res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def setUp(self):
super(NotificationTest, self).setUp()
fake_notifier.reset()
def _resource_op_notifier(self, opname, resource, expected_errors=False):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
'.'.join([resource, opname, "end"]))
self.assertEqual(len(expected_events),
len(fake_notifier.NOTIFICATIONS))
for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events):
self.assertEqual('INFO', msg['priority'])
self.assertEqual(event, msg['event_type'])
if opname == 'delete' and event == 'network.delete.end':
self.assertIn('payload', msg)
resource = msg['payload']
self.assertIn('network_id', resource)
self.assertIn('network', resource)
self.assertEqual(expected_code, res.status_int)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
class RegistryNotificationTest(APIv2TestBase):
def setUp(self):
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(RegistryNotificationTest, self).setUp()
def _test_registry_notify(self, opname, resource, initial_input=None):
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(registry, 'publish') as notify:
if opname == 'create':
res = self.api.post_json(
_get_path('networks'),
initial_input)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input)
expected_code = exc.HTTPOk.code
if opname == 'delete':
res = self.api.delete(_get_path('networks', id=_uuid()))
expected_code = exc.HTTPNoContent.code
self.assertTrue(notify.called)
self.assertEqual(expected_code, res.status_int)
def test_network_create_registry_notify(self):
input = {'network': {'name': 'net',
'tenant_id': _uuid()}}
self._test_registry_notify('create', 'network', input)
def test_network_delete_registry_notify(self):
self._test_registry_notify('delete', 'network')
def test_network_update_registry_notify(self):
input = {'network': {'name': 'net'}}
self._test_registry_notify('update', 'network', input)
def test_networks_create_bulk_registry_notify(self):
input = {'networks': [{'name': 'net1',
'tenant_id': _uuid()},
{'name': 'net2',
'tenant_id': _uuid()}]}
self._test_registry_notify('create', 'network', input)
class QuotaTest(APIv2TestBase):
def setUp(self):
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(QuotaTest, self).setUp()
replacement_registry = resource_registry.ResourceRegistry()
registry_patcher = mock.patch('neutron.quota.resource_registry.'
'ResourceRegistry.get_instance')
mock_registry = registry_patcher.start().return_value
mock_registry.get_resource = replacement_registry.get_resource
mock_registry.resources = replacement_registry.resources
replacement_registry.register_resource_by_name('network')
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
class ExtensionTestCase(base.BaseTestCase):
def setUp(self):
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(fixture.APIDefinitionFixture())
self.config_parse()
self.setup_coreplugin(plugin, load_plugins=False)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.supported_extension_aliases = ["v2attrs"]
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def test_extended_create(self):
net_id = _uuid()
tenant_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': tenant_id,
'project_id': tenant_id,
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
self.assertIn('network', res.json)
net = res.json['network']
self.assertEqual(net_id, net['id'])
self.assertEqual("ACTIVE", net['status'])
self.assertEqual("123", net['v2attrs:something'])
self.assertNotIn('v2attrs:something_else', net)
class TestSubresourcePlugin(object):
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
def mactions(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(expect_val, sorted(actual_val))
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class SortingTestCase(base.BaseTestCase):
def test_get_sorts(self):
path = '/?sort_key=foo&sort_dir=desc&sort_key=bar&sort_dir=asc'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}, 'bar': {'key': 'val'}}
expect_val = [('foo', False), ('bar', True)]
actual_val = api_common.get_sorts(request, attr_info)
self.assertEqual(expect_val, actual_val)
def test_get_sorts_with_project_id(self):
path = '/?sort_key=project_id&sort_dir=desc'
request = webob.Request.blank(path)
attr_info = {'tenant_id': {'key': 'val'}}
expect_val = [('project_id', False)]
actual_val = api_common.get_sorts(request, attr_info)
self.assertEqual(expect_val, actual_val)
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {},
["fields"]))
@mock.patch('neutron.api.api_common.is_empty_string_filtering_supported',
return_value=False)
def test_blank_values(self, mock_is_supported):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
@mock.patch('neutron.api.api_common.is_empty_string_filtering_supported',
return_value=True)
def test_blank_values_with_filtering_supported(self, mock_is_supported):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({'foo': [''], 'bar': [''], 'baz': [''], 'qux': ['']},
api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(expect_val, actual_val)
def test_attr_info_with_project_info_populated(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'tenant_id': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(expect_val, actual_val)
expect_attr_info = {'tenant_id': {'key': 'val'},
'project_id': {'key': 'val'}}
self.assertEqual(expect_attr_info, attr_info)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(expect_val, actual_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': converters.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertOrderedEqual(expect_val, actual_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': converters.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(expect_val, actual_val)
def test_attr_info_with_base_db_attributes(self):
path = '/?__contains__=1&__class__=2'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify)
| true
| true
|
f719631a516cc8db5629534c7d6fe3350cae0be1
| 30,892
|
py
|
Python
|
tests/integrate_test/samples/sample_builtin/0_0_5/governance/governance.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 52
|
2018-08-24T02:28:43.000Z
|
2021-07-06T04:44:22.000Z
|
tests/integrate_test/samples/sample_builtin/0_0_5/governance/governance.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 62
|
2018-09-17T06:59:16.000Z
|
2021-12-15T06:02:51.000Z
|
tests/integrate_test/samples/sample_builtin/0_0_5/governance/governance.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 35
|
2018-09-14T02:42:10.000Z
|
2022-02-05T10:34:46.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from iconservice import *
TAG = 'Governance'
DEBUG = False
CURRENT = 'current'
NEXT = 'next'
STATUS = 'status'
DEPLOY_TX_HASH = 'deployTxHash'
AUDIT_TX_HASH = 'auditTxHash'
VALID_STATUS_KEYS = [STATUS, DEPLOY_TX_HASH, AUDIT_TX_HASH]
STATUS_PENDING = 'pending'
STATUS_ACTIVE = 'active'
STATUS_INACTIVE = 'inactive'
STATUS_REJECTED = 'rejected'
STEP_TYPE_DEFAULT = 'default'
STEP_TYPE_CONTRACT_CALL = 'contractCall'
STEP_TYPE_CONTRACT_CREATE = 'contractCreate'
STEP_TYPE_CONTRACT_UPDATE = 'contractUpdate'
STEP_TYPE_CONTRACT_DESTRUCT = 'contractDestruct'
STEP_TYPE_CONTRACT_SET = 'contractSet'
STEP_TYPE_GET = 'get'
STEP_TYPE_SET = 'set'
STEP_TYPE_REPLACE = 'replace'
STEP_TYPE_DELETE = 'delete'
STEP_TYPE_INPUT = 'input'
STEP_TYPE_EVENT_LOG = 'eventLog'
STEP_TYPE_API_CALL = 'apiCall'
INITIAL_STEP_COST_KEYS = [STEP_TYPE_DEFAULT,
STEP_TYPE_CONTRACT_CALL, STEP_TYPE_CONTRACT_CREATE, STEP_TYPE_CONTRACT_UPDATE,
STEP_TYPE_CONTRACT_DESTRUCT, STEP_TYPE_CONTRACT_SET,
STEP_TYPE_GET, STEP_TYPE_SET, STEP_TYPE_REPLACE, STEP_TYPE_DELETE, STEP_TYPE_INPUT,
STEP_TYPE_EVENT_LOG, STEP_TYPE_API_CALL]
CONTEXT_TYPE_INVOKE = 'invoke'
CONTEXT_TYPE_QUERY = 'query'
class StepCosts:
"""
DB for stepCosts management.
It is combined DictDB and ArrayDB in order to iterate items.
"""
_STEP_TYPES = 'step_types'
_STEP_COSTS = 'step_costs'
def __init__(self, db: IconScoreDatabase):
self._step_types = ArrayDB(self._STEP_TYPES, db, value_type=str)
self._step_costs = DictDB(self._STEP_COSTS, db, value_type=int)
def __setitem__(self, step_type: str, cost: int):
if step_type not in self._step_costs:
self._step_types.put(step_type)
self._step_costs[step_type] = cost
def __getitem__(self, step_type: str):
return self._step_costs[step_type]
def __delitem__(self, step_type: str):
# delete does not actually do delete but set zero
if step_type in self._step_costs:
self._step_costs[step_type] = 0
def __contains__(self, step_type: str):
return step_type in self._step_costs
def __iter__(self):
return self._step_types.__iter__()
def __len__(self):
return self._step_types.__len__()
def items(self):
for step_type in self._step_types:
yield (step_type, self._step_costs[step_type])
class Governance(IconSystemScoreBase):
_SCORE_STATUS = 'score_status' # legacy
_AUDITOR_LIST = 'auditor_list'
_DEPLOYER_LIST = 'deployer_list'
_SCORE_BLACK_LIST = 'score_black_list'
_STEP_PRICE = 'step_price'
_MAX_STEP_LIMITS = 'max_step_limits'
_VERSION = 'version'
_IMPORT_WHITE_LIST = 'import_white_list'
_IMPORT_WHITE_LIST_KEYS = 'import_white_list_keys'
_SERVICE_CONFIG = 'service_config'
_AUDIT_STATUS = 'audit_status'
_REJECT_STATUS = 'reject_status'
_REVISION_CODE = 'revision_code'
_REVISION_NAME = 'revision_name'
@eventlog(indexed=1)
def Accepted(self, txHash: str):
pass
@eventlog(indexed=1)
def Rejected(self, txHash: str, reason: str):
pass
@eventlog(indexed=1)
def StepPriceChanged(self, stepPrice: int):
pass
@eventlog(indexed=1)
def StepCostChanged(self, stepType: str, cost: int):
pass
@eventlog(indexed=1)
def MaxStepLimitChanged(self, contextType: str, value: int):
pass
@eventlog(indexed=0)
def AddImportWhiteListLog(self, addList: str, addCount: int):
pass
@eventlog(indexed=0)
def RemoveImportWhiteListLog(self, removeList: str, removeCount: int):
pass
@eventlog(indexed=0)
def UpdateServiceConfigLog(self, serviceFlag: int):
pass
@property
def import_white_list_cache(self) -> dict:
return self._get_import_white_list()
@property
def service_config(self) -> int:
return self._service_config.get()
@property
def revision_code(self) -> int:
return self._revision_code.get()
def __init__(self, db: IconScoreDatabase) -> None:
super().__init__(db)
# self._score_status = DictDB(self._SCORE_STATUS, db, value_type=bytes, depth=3)
self._auditor_list = ArrayDB(self._AUDITOR_LIST, db, value_type=Address)
self._deployer_list = ArrayDB(self._DEPLOYER_LIST, db, value_type=Address)
self._score_black_list = ArrayDB(self._SCORE_BLACK_LIST, db, value_type=Address)
self._step_price = VarDB(self._STEP_PRICE, db, value_type=int)
self._step_costs = StepCosts(db)
self._max_step_limits = DictDB(self._MAX_STEP_LIMITS, db, value_type=int)
self._version = VarDB(self._VERSION, db, value_type=str)
self._import_white_list = DictDB(self._IMPORT_WHITE_LIST, db, value_type=str)
self._import_white_list_keys = ArrayDB(self._IMPORT_WHITE_LIST_KEYS, db, value_type=str)
self._service_config = VarDB(self._SERVICE_CONFIG, db, value_type=int)
self._audit_status = DictDB(self._AUDIT_STATUS, db, value_type=bytes)
self._reject_status = DictDB(self._REJECT_STATUS, db, value_type=bytes)
self._revision_code = VarDB(self._REVISION_CODE, db, value_type=int)
self._revision_name = VarDB(self._REVISION_NAME, db, value_type=str)
def on_install(self, stepPrice: int = 10 ** 10) -> None:
super().on_install()
# add owner into initial auditor list
Logger.debug(f'on_install: owner = "{self.owner}"', TAG)
self._auditor_list.put(self.owner)
# add owner into initial deployer list
self._deployer_list.put(self.owner)
# set initial step price
self._step_price.set(stepPrice)
# set initial step costs
self._set_initial_step_costs()
# set initial max step limits
self._set_initial_max_step_limits()
# set initial import white list
self._set_initial_import_white_list()
# set initial service config
self._set_initial_service_config()
def on_update(self) -> None:
super().on_update()
if self.is_less_than_target_version('0.0.2'):
self._migrate_v0_0_2()
if self.is_less_than_target_version('0.0.3'):
self._migrate_v0_0_3()
if self.is_less_than_target_version('0.0.4'):
self._migrate_v0_0_4()
if self.is_less_than_target_version('0.0.5'):
self._migrate_v0_0_5()
self._version.set('0.0.5')
def is_less_than_target_version(self, target_version: str) -> bool:
last_version = self._version.get()
return self._versions(last_version) < self._versions(target_version)
def _migrate_v0_0_2(self):
"""
This migration updates the step costs and max step limits
"""
if len(self._step_costs) == 0:
# migrates from old DB of step_costs.
for step_type in INITIAL_STEP_COST_KEYS:
if step_type in self._step_costs:
self._step_costs._step_types.put(step_type)
self._set_initial_step_costs()
self._set_initial_max_step_limits()
def _migrate_v0_0_3(self):
# set initial import white list
self._set_initial_import_white_list()
self._set_initial_service_config()
self._set_initial_max_step_limits()
self._set_initial_revision()
def _migrate_v0_0_4(self):
pass
def _migrate_v0_0_5(self):
self._set_initial_revision()
@staticmethod
def _versions(version: str):
parts = []
if version is not None:
for part in version.split("."):
try:
parts.append(int(part))
except ValueError:
pass
return tuple(parts)
@external(readonly=True)
def getScoreStatus(self, address: Address) -> dict:
# Governance
if self.is_builtin_score(address):
deploy_info = self.get_deploy_info(address)
result = {
CURRENT: {
STATUS: STATUS_ACTIVE
}
}
if deploy_info.current_tx_hash is not None:
result[CURRENT][DEPLOY_TX_HASH] = deploy_info.current_tx_hash
return result
deploy_info = self.get_deploy_info(address)
if deploy_info is None:
self.revert('SCORE not found')
current_tx_hash = deploy_info.current_tx_hash
next_tx_hash = deploy_info.next_tx_hash
active = self.is_score_active(address)
# install audit
if current_tx_hash is None and next_tx_hash and active is False:
reject_tx_hash = self._reject_status[next_tx_hash]
if reject_tx_hash:
result = {
NEXT: {
STATUS: STATUS_REJECTED,
DEPLOY_TX_HASH: next_tx_hash,
AUDIT_TX_HASH: reject_tx_hash
}}
else:
result = {
NEXT: {
STATUS: STATUS_PENDING,
DEPLOY_TX_HASH: next_tx_hash
}}
elif current_tx_hash and next_tx_hash is None and active is True:
audit_tx_hash = self._audit_status[current_tx_hash]
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash
}}
if audit_tx_hash:
result[CURRENT][AUDIT_TX_HASH] = audit_tx_hash
else:
# update audit
if current_tx_hash and next_tx_hash and active is True:
current_audit_tx_hash = self._audit_status[current_tx_hash]
next_reject_tx_hash = self._reject_status[next_tx_hash]
if next_reject_tx_hash:
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash,
AUDIT_TX_HASH: current_audit_tx_hash
},
NEXT: {
STATUS: STATUS_REJECTED,
DEPLOY_TX_HASH: next_tx_hash,
AUDIT_TX_HASH: next_reject_tx_hash
}}
else:
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash,
AUDIT_TX_HASH: current_audit_tx_hash
},
NEXT: {
STATUS: STATUS_PENDING,
DEPLOY_TX_HASH: next_tx_hash
}}
else:
result = {}
return result
@external(readonly=True)
def getStepPrice(self) -> int:
return self._step_price.get()
@external
def setStepPrice(self, stepPrice: int):
# only owner can set new step price
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if stepPrice > 0:
self._step_price.set(stepPrice)
self.StepPriceChanged(stepPrice)
@external
def acceptScore(self, txHash: bytes):
# check message sender
Logger.debug(f'acceptScore: msg.sender = "{self.msg.sender}"', TAG)
if self.msg.sender not in self._auditor_list:
self.revert('Invalid sender: no permission')
# check txHash
tx_params = self.get_deploy_tx_params(txHash)
if tx_params is None:
self.revert('Invalid txHash: None')
deploy_score_addr = tx_params.score_address
deploy_info = self.get_deploy_info(deploy_score_addr)
if txHash != deploy_info.next_tx_hash:
self.revert('Invalid txHash: mismatch')
next_audit_tx_hash = self._audit_status[txHash]
if next_audit_tx_hash:
self.revert('Invalid txHash: already accepted')
next_reject_tx_hash = self._reject_status[txHash]
if next_reject_tx_hash:
self.revert('Invalid txHash: already rejected')
self._deploy(txHash, deploy_score_addr)
Logger.debug(f'acceptScore: score_address = "{tx_params.score_address}"', TAG)
self._audit_status[txHash] = self.tx.hash
self.Accepted('0x' + txHash.hex())
def _deploy(self, tx_hash: bytes, score_addr: Address):
owner = self.get_owner(score_addr)
tmp_sender = self.msg.sender
self.msg.sender = owner
try:
self._context.deploy(tx_hash)
finally:
self.msg.sender = tmp_sender
@external
def rejectScore(self, txHash: bytes, reason: str):
# check message sender
Logger.debug(f'rejectScore: msg.sender = "{self.msg.sender}"', TAG)
if self.msg.sender not in self._auditor_list:
self.revert('Invalid sender: no permission')
# check txHash
tx_params = self.get_deploy_tx_params(txHash)
if tx_params is None:
self.revert('Invalid txHash')
next_audit_tx_hash = self._audit_status[txHash]
if next_audit_tx_hash:
self.revert('Invalid txHash: already accepted')
next_reject_tx_hash = self._reject_status[txHash]
if next_reject_tx_hash:
self.revert('Invalid txHash: already rejected')
Logger.debug(f'rejectScore: score_address = "{tx_params.score_address}", reason = {reason}', TAG)
self._reject_status[txHash] = self.tx.hash
self.Rejected('0x' + txHash.hex(), reason)
@external
def addAuditor(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
# check message sender, only owner can add new auditor
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._auditor_list:
self._auditor_list.put(address)
else:
self.revert(f'Invalid address: already auditor')
if DEBUG is True:
self._print_auditor_list('addAuditor')
@external
def removeAuditor(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if address not in self._auditor_list:
self.revert('Invalid address: not in list')
# check message sender
if self.msg.sender != self.owner:
if self.msg.sender != address:
self.revert('Invalid sender: not yourself')
# get the topmost value
top = self._auditor_list.pop()
if top != address:
for i in range(len(self._auditor_list)):
if self._auditor_list[i] == address:
self._auditor_list[i] = top
if DEBUG is True:
self._print_auditor_list('removeAuditor')
def _print_auditor_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._auditor_list)}', TAG)
for auditor in self._auditor_list:
Logger.debug(f' --- {auditor}', TAG)
@external
def addDeployer(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
# check message sender, only owner can add new deployer
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._deployer_list:
self._deployer_list.put(address)
else:
self.revert(f'Invalid address: already deployer')
if DEBUG is True:
self._print_deployer_list('addDeployer')
@external
def removeDeployer(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if address not in self._deployer_list:
self.revert('Invalid address: not in list')
# check message sender
if self.msg.sender != self.owner:
if self.msg.sender != address:
self.revert('Invalid sender: not yourself')
# get the topmost value
top = self._deployer_list.pop()
if top != address:
for i in range(len(self._deployer_list)):
if self._deployer_list[i] == address:
self._deployer_list[i] = top
if DEBUG is True:
self._print_deployer_list('removeDeployer')
@external(readonly=True)
def isDeployer(self, address: Address) -> bool:
Logger.debug(f'isDeployer address: {address}', TAG)
return address in self._deployer_list
def _print_deployer_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._deployer_list)}', TAG)
for deployer in self._deployer_list:
Logger.debug(f' --- {deployer}', TAG)
@external
def addToScoreBlackList(self, address: Address):
if not address.is_contract:
self.revert(f'Invalid SCORE Address: {address}')
# check message sender, only owner can add new blacklist
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if self.address == address:
self.revert("can't add myself")
if address not in self._score_black_list:
self._score_black_list.put(address)
else:
self.revert('Invalid address: already SCORE blacklist')
if DEBUG is True:
self._print_black_list('addScoreToBlackList')
@external
def removeFromScoreBlackList(self, address: Address):
if not address.is_contract:
self.revert(f'Invalid SCORE Address: {address}')
# check message sender, only owner can remove from blacklist
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._score_black_list:
self.revert('Invalid address: not in list')
# get the topmost value
top = self._score_black_list.pop()
if top != address:
for i in range(len(self._score_black_list)):
if self._score_black_list[i] == address:
self._score_black_list[i] = top
if DEBUG is True:
self._print_black_list('removeScoreFromBlackList')
@external(readonly=True)
def isInScoreBlackList(self, address: Address) -> bool:
Logger.debug(f'isInBlackList address: {address}', TAG)
return address in self._score_black_list
def _print_black_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._score_black_list)}', TAG)
for addr in self._score_black_list:
Logger.debug(f' --- {addr}', TAG)
def _set_initial_step_costs(self):
initial_costs = {
STEP_TYPE_DEFAULT: 100_000,
STEP_TYPE_CONTRACT_CALL: 25_000,
STEP_TYPE_CONTRACT_CREATE: 1_000_000_000,
STEP_TYPE_CONTRACT_UPDATE: 1_600_000_000,
STEP_TYPE_CONTRACT_DESTRUCT: -70_000,
STEP_TYPE_CONTRACT_SET: 30_000,
STEP_TYPE_GET: 0,
STEP_TYPE_SET: 320,
STEP_TYPE_REPLACE: 80,
STEP_TYPE_DELETE: -240,
STEP_TYPE_INPUT: 200,
STEP_TYPE_EVENT_LOG: 100,
STEP_TYPE_API_CALL: 0
}
for key, value in initial_costs.items():
self._step_costs[key] = value
def _set_initial_max_step_limits(self):
self._max_step_limits[CONTEXT_TYPE_INVOKE] = 2_500_000_000
self._max_step_limits[CONTEXT_TYPE_QUERY] = 50_000_000
def _set_initial_revision(self):
self._revision_code.set(2)
self._revision_name.set("1.1.0")
@external(readonly=True)
def getStepCosts(self) -> dict:
result = {}
for key, value in self._step_costs.items():
result[key] = value
return result
@external
def setStepCost(self, stepType: str, cost: int):
# only owner can set new step cost
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if cost < 0:
if stepType != STEP_TYPE_CONTRACT_DESTRUCT and \
stepType != STEP_TYPE_DELETE:
self.revert(f'Invalid step cost: {stepType}, {cost}')
self._step_costs[stepType] = cost
self.StepCostChanged(stepType, cost)
@external(readonly=True)
def getMaxStepLimit(self, contextType: str) -> int:
return self._max_step_limits[contextType]
@external
def setMaxStepLimit(self, contextType: str, value: int):
# only owner can set new context type value
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if value < 0:
self.revert('Invalid value: negative number')
if contextType == CONTEXT_TYPE_INVOKE or contextType == CONTEXT_TYPE_QUERY:
self._max_step_limits[contextType] = value
self.MaxStepLimitChanged(contextType, value)
else:
self.revert("Invalid context type")
@external(readonly=True)
def getVersion(self) -> str:
return self._version.get()
def _set_initial_import_white_list(self):
key = "iconservice"
# if iconsevice has no value set ALL
if self._import_white_list[key] == "":
self._import_white_list[key] = "*"
self._import_white_list_keys.put(key)
@external
def addImportWhiteList(self, importStmt: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
import_stmt_dict = {}
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
self.revert(f'Invalid import statement: {e}')
# add to import white list
log_entry = []
for key, value in import_stmt_dict.items():
old_value: str = self._import_white_list[key]
if old_value == "*":
# no need to add
continue
if len(value) == 0:
# set import white list as ALL
self._import_white_list[key] = "*"
# add to import white list keys
if old_value == "":
self._import_white_list_keys.put(key)
# make added item list for eventlog
log_entry.append((key, value))
elif old_value == "":
# set import white list
self._import_white_list[key] = ','.join(value)
# add to import white list keys
self._import_white_list_keys.put(key)
# make added item list for eventlog
log_entry.append((key, value))
else:
old_value_list = old_value.split(',')
new_value = []
for v in value:
if v not in old_value_list:
new_value.append(v)
# set import white list
self._import_white_list[key] = f'{old_value},{",".join(new_value)}'
# make added item list for eventlog
log_entry.append((key, new_value))
# make eventlog
if len(log_entry):
self.AddImportWhiteListLog(str(log_entry), len(log_entry))
if DEBUG is True:
Logger.debug(f'checking added item ({importStmt}): {self.isInImportWhiteList(importStmt)}')
@external
def removeImportWhiteList(self, importStmt: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
import_stmt_dict = {}
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
self.revert(f'Invalid import statement: {e}')
# remove from import white list
log_entry = []
for key, value in import_stmt_dict.items():
old_value: str = self._import_white_list[key]
if old_value == "*":
if len(value) == 0:
# remove import white list
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, value))
continue
if len(value) == 0:
# remove import white list
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, value))
# add to import white list keys
self._import_white_list_keys.put(key)
else:
old_value_list = old_value.split(',')
remove_value = []
new_value = []
for v in old_value_list:
if v in value:
remove_value.append(v)
else:
new_value.append(v)
# set import white list
if len(new_value):
self._import_white_list[key] = f'{",".join(new_value)}'
else:
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, remove_value))
if len(log_entry):
# make eventlog
self.AddImportWhiteListLog(str(log_entry), len(log_entry))
if DEBUG is True:
Logger.debug(f'checking removed item ({importStmt}): {self.isInImportWhiteList(importStmt)}')
@external(readonly=True)
def isInImportWhiteList(self, importStmt: str) -> bool:
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
raise ValueError(f'{e}')
cache_import_white_list = self._get_import_white_list()
for key, value in import_stmt_dict.items():
old_value: list = cache_import_white_list.get(key, None)
if old_value is None:
return False
if old_value[0] == "*":
# import white list has ALL. See next key
continue
if len(value) == 0:
# input is ALL
return False
for v in value:
if v not in old_value:
return False
if DEBUG is True:
Logger.debug(f'({importStmt}) is in import white list')
return True
@staticmethod
def _check_import_stmt(import_stmt: str) -> dict:
Logger.debug(f'check_import_stmt: {import_stmt}')
import_stmt_dict: dict = json_loads(import_stmt.replace("\'", "\""))
for key, value in import_stmt_dict.items():
if not isinstance(key, str):
raise TypeError("Key must be of type `str`")
if not isinstance(value, list):
raise TypeError("Value must be of type `list`")
else:
for v in value:
if not isinstance(v, str):
raise TypeError("Element of value must be of type `str`")
Logger.debug(f'check_import_stmt_dict: {import_stmt_dict}')
return import_stmt_dict
def _get_import_white_list(self) -> dict:
whitelist = {}
for v in self._import_white_list_keys:
values: str = self._import_white_list[v]
whitelist[v] = values.split(',')
return whitelist
def _remove_import_white_list(self, key: str):
# remove from import white list
self._import_white_list.remove(key)
# remove from import white list keys
top = self._import_white_list_keys.pop()
if top != key:
for i in range(len(self._import_white_list_keys)):
if self._import_white_list_keys[i] == key:
self._import_white_list_keys[i] = top
def _set_initial_service_config(self):
self._service_config.set(self.get_icon_service_flag() | 8)
@external
def updateServiceConfig(self, serviceFlag: int):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if serviceFlag < 0:
self.revert(f'updateServiceConfig: serviceFlag({serviceFlag}) < 0')
max_flag = 0
for flag in IconServiceFlag:
max_flag |= flag
if serviceFlag > max_flag:
self.revert(f'updateServiceConfig: serviceFlag({serviceFlag}) > max_flag({max_flag})')
prev_service_config = self._service_config.get()
if prev_service_config != serviceFlag:
self._service_config.set(serviceFlag)
self.UpdateServiceConfigLog(serviceFlag)
if DEBUG is True:
Logger.debug(f'updateServiceConfig (prev: {prev_service_config} flag: {serviceFlag})')
else:
if DEBUG is True:
Logger.debug(f'updateServiceConfig not update ({serviceFlag})')
@external(readonly=True)
def getServiceConfig(self) -> dict:
table = {}
service_flag = self._service_config.get()
for flag in IconServiceFlag:
if service_flag & flag == flag:
table[flag.name] = True
else:
table[flag.name] = False
return table
@external
def setRevision(self, code: int, name: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
prev_code = self._revision_code.get()
if code < prev_code:
self.revert(f"can't decrease code")
self._revision_code.set(code)
self._revision_name.set(name)
@external(readonly=True)
def getRevision(self) -> dict:
return {'code': self._revision_code.get(), 'name': self._revision_name.get()}
| 36.515366
| 109
| 0.605529
|
from iconservice import *
TAG = 'Governance'
DEBUG = False
CURRENT = 'current'
NEXT = 'next'
STATUS = 'status'
DEPLOY_TX_HASH = 'deployTxHash'
AUDIT_TX_HASH = 'auditTxHash'
VALID_STATUS_KEYS = [STATUS, DEPLOY_TX_HASH, AUDIT_TX_HASH]
STATUS_PENDING = 'pending'
STATUS_ACTIVE = 'active'
STATUS_INACTIVE = 'inactive'
STATUS_REJECTED = 'rejected'
STEP_TYPE_DEFAULT = 'default'
STEP_TYPE_CONTRACT_CALL = 'contractCall'
STEP_TYPE_CONTRACT_CREATE = 'contractCreate'
STEP_TYPE_CONTRACT_UPDATE = 'contractUpdate'
STEP_TYPE_CONTRACT_DESTRUCT = 'contractDestruct'
STEP_TYPE_CONTRACT_SET = 'contractSet'
STEP_TYPE_GET = 'get'
STEP_TYPE_SET = 'set'
STEP_TYPE_REPLACE = 'replace'
STEP_TYPE_DELETE = 'delete'
STEP_TYPE_INPUT = 'input'
STEP_TYPE_EVENT_LOG = 'eventLog'
STEP_TYPE_API_CALL = 'apiCall'
INITIAL_STEP_COST_KEYS = [STEP_TYPE_DEFAULT,
STEP_TYPE_CONTRACT_CALL, STEP_TYPE_CONTRACT_CREATE, STEP_TYPE_CONTRACT_UPDATE,
STEP_TYPE_CONTRACT_DESTRUCT, STEP_TYPE_CONTRACT_SET,
STEP_TYPE_GET, STEP_TYPE_SET, STEP_TYPE_REPLACE, STEP_TYPE_DELETE, STEP_TYPE_INPUT,
STEP_TYPE_EVENT_LOG, STEP_TYPE_API_CALL]
CONTEXT_TYPE_INVOKE = 'invoke'
CONTEXT_TYPE_QUERY = 'query'
class StepCosts:
_STEP_TYPES = 'step_types'
_STEP_COSTS = 'step_costs'
def __init__(self, db: IconScoreDatabase):
self._step_types = ArrayDB(self._STEP_TYPES, db, value_type=str)
self._step_costs = DictDB(self._STEP_COSTS, db, value_type=int)
def __setitem__(self, step_type: str, cost: int):
if step_type not in self._step_costs:
self._step_types.put(step_type)
self._step_costs[step_type] = cost
def __getitem__(self, step_type: str):
return self._step_costs[step_type]
def __delitem__(self, step_type: str):
if step_type in self._step_costs:
self._step_costs[step_type] = 0
def __contains__(self, step_type: str):
return step_type in self._step_costs
def __iter__(self):
return self._step_types.__iter__()
def __len__(self):
return self._step_types.__len__()
def items(self):
for step_type in self._step_types:
yield (step_type, self._step_costs[step_type])
class Governance(IconSystemScoreBase):
_SCORE_STATUS = 'score_status'
_AUDITOR_LIST = 'auditor_list'
_DEPLOYER_LIST = 'deployer_list'
_SCORE_BLACK_LIST = 'score_black_list'
_STEP_PRICE = 'step_price'
_MAX_STEP_LIMITS = 'max_step_limits'
_VERSION = 'version'
_IMPORT_WHITE_LIST = 'import_white_list'
_IMPORT_WHITE_LIST_KEYS = 'import_white_list_keys'
_SERVICE_CONFIG = 'service_config'
_AUDIT_STATUS = 'audit_status'
_REJECT_STATUS = 'reject_status'
_REVISION_CODE = 'revision_code'
_REVISION_NAME = 'revision_name'
@eventlog(indexed=1)
def Accepted(self, txHash: str):
pass
@eventlog(indexed=1)
def Rejected(self, txHash: str, reason: str):
pass
@eventlog(indexed=1)
def StepPriceChanged(self, stepPrice: int):
pass
@eventlog(indexed=1)
def StepCostChanged(self, stepType: str, cost: int):
pass
@eventlog(indexed=1)
def MaxStepLimitChanged(self, contextType: str, value: int):
pass
@eventlog(indexed=0)
def AddImportWhiteListLog(self, addList: str, addCount: int):
pass
@eventlog(indexed=0)
def RemoveImportWhiteListLog(self, removeList: str, removeCount: int):
pass
@eventlog(indexed=0)
def UpdateServiceConfigLog(self, serviceFlag: int):
pass
@property
def import_white_list_cache(self) -> dict:
return self._get_import_white_list()
@property
def service_config(self) -> int:
return self._service_config.get()
@property
def revision_code(self) -> int:
return self._revision_code.get()
def __init__(self, db: IconScoreDatabase) -> None:
super().__init__(db)
self._auditor_list = ArrayDB(self._AUDITOR_LIST, db, value_type=Address)
self._deployer_list = ArrayDB(self._DEPLOYER_LIST, db, value_type=Address)
self._score_black_list = ArrayDB(self._SCORE_BLACK_LIST, db, value_type=Address)
self._step_price = VarDB(self._STEP_PRICE, db, value_type=int)
self._step_costs = StepCosts(db)
self._max_step_limits = DictDB(self._MAX_STEP_LIMITS, db, value_type=int)
self._version = VarDB(self._VERSION, db, value_type=str)
self._import_white_list = DictDB(self._IMPORT_WHITE_LIST, db, value_type=str)
self._import_white_list_keys = ArrayDB(self._IMPORT_WHITE_LIST_KEYS, db, value_type=str)
self._service_config = VarDB(self._SERVICE_CONFIG, db, value_type=int)
self._audit_status = DictDB(self._AUDIT_STATUS, db, value_type=bytes)
self._reject_status = DictDB(self._REJECT_STATUS, db, value_type=bytes)
self._revision_code = VarDB(self._REVISION_CODE, db, value_type=int)
self._revision_name = VarDB(self._REVISION_NAME, db, value_type=str)
def on_install(self, stepPrice: int = 10 ** 10) -> None:
super().on_install()
Logger.debug(f'on_install: owner = "{self.owner}"', TAG)
self._auditor_list.put(self.owner)
self._deployer_list.put(self.owner)
self._step_price.set(stepPrice)
self._set_initial_step_costs()
self._set_initial_max_step_limits()
self._set_initial_import_white_list()
self._set_initial_service_config()
def on_update(self) -> None:
super().on_update()
if self.is_less_than_target_version('0.0.2'):
self._migrate_v0_0_2()
if self.is_less_than_target_version('0.0.3'):
self._migrate_v0_0_3()
if self.is_less_than_target_version('0.0.4'):
self._migrate_v0_0_4()
if self.is_less_than_target_version('0.0.5'):
self._migrate_v0_0_5()
self._version.set('0.0.5')
def is_less_than_target_version(self, target_version: str) -> bool:
last_version = self._version.get()
return self._versions(last_version) < self._versions(target_version)
def _migrate_v0_0_2(self):
if len(self._step_costs) == 0:
for step_type in INITIAL_STEP_COST_KEYS:
if step_type in self._step_costs:
self._step_costs._step_types.put(step_type)
self._set_initial_step_costs()
self._set_initial_max_step_limits()
def _migrate_v0_0_3(self):
self._set_initial_import_white_list()
self._set_initial_service_config()
self._set_initial_max_step_limits()
self._set_initial_revision()
def _migrate_v0_0_4(self):
pass
def _migrate_v0_0_5(self):
self._set_initial_revision()
@staticmethod
def _versions(version: str):
parts = []
if version is not None:
for part in version.split("."):
try:
parts.append(int(part))
except ValueError:
pass
return tuple(parts)
@external(readonly=True)
def getScoreStatus(self, address: Address) -> dict:
if self.is_builtin_score(address):
deploy_info = self.get_deploy_info(address)
result = {
CURRENT: {
STATUS: STATUS_ACTIVE
}
}
if deploy_info.current_tx_hash is not None:
result[CURRENT][DEPLOY_TX_HASH] = deploy_info.current_tx_hash
return result
deploy_info = self.get_deploy_info(address)
if deploy_info is None:
self.revert('SCORE not found')
current_tx_hash = deploy_info.current_tx_hash
next_tx_hash = deploy_info.next_tx_hash
active = self.is_score_active(address)
if current_tx_hash is None and next_tx_hash and active is False:
reject_tx_hash = self._reject_status[next_tx_hash]
if reject_tx_hash:
result = {
NEXT: {
STATUS: STATUS_REJECTED,
DEPLOY_TX_HASH: next_tx_hash,
AUDIT_TX_HASH: reject_tx_hash
}}
else:
result = {
NEXT: {
STATUS: STATUS_PENDING,
DEPLOY_TX_HASH: next_tx_hash
}}
elif current_tx_hash and next_tx_hash is None and active is True:
audit_tx_hash = self._audit_status[current_tx_hash]
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash
}}
if audit_tx_hash:
result[CURRENT][AUDIT_TX_HASH] = audit_tx_hash
else:
if current_tx_hash and next_tx_hash and active is True:
current_audit_tx_hash = self._audit_status[current_tx_hash]
next_reject_tx_hash = self._reject_status[next_tx_hash]
if next_reject_tx_hash:
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash,
AUDIT_TX_HASH: current_audit_tx_hash
},
NEXT: {
STATUS: STATUS_REJECTED,
DEPLOY_TX_HASH: next_tx_hash,
AUDIT_TX_HASH: next_reject_tx_hash
}}
else:
result = {
CURRENT: {
STATUS: STATUS_ACTIVE,
DEPLOY_TX_HASH: current_tx_hash,
AUDIT_TX_HASH: current_audit_tx_hash
},
NEXT: {
STATUS: STATUS_PENDING,
DEPLOY_TX_HASH: next_tx_hash
}}
else:
result = {}
return result
@external(readonly=True)
def getStepPrice(self) -> int:
return self._step_price.get()
@external
def setStepPrice(self, stepPrice: int):
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if stepPrice > 0:
self._step_price.set(stepPrice)
self.StepPriceChanged(stepPrice)
@external
def acceptScore(self, txHash: bytes):
Logger.debug(f'acceptScore: msg.sender = "{self.msg.sender}"', TAG)
if self.msg.sender not in self._auditor_list:
self.revert('Invalid sender: no permission')
tx_params = self.get_deploy_tx_params(txHash)
if tx_params is None:
self.revert('Invalid txHash: None')
deploy_score_addr = tx_params.score_address
deploy_info = self.get_deploy_info(deploy_score_addr)
if txHash != deploy_info.next_tx_hash:
self.revert('Invalid txHash: mismatch')
next_audit_tx_hash = self._audit_status[txHash]
if next_audit_tx_hash:
self.revert('Invalid txHash: already accepted')
next_reject_tx_hash = self._reject_status[txHash]
if next_reject_tx_hash:
self.revert('Invalid txHash: already rejected')
self._deploy(txHash, deploy_score_addr)
Logger.debug(f'acceptScore: score_address = "{tx_params.score_address}"', TAG)
self._audit_status[txHash] = self.tx.hash
self.Accepted('0x' + txHash.hex())
def _deploy(self, tx_hash: bytes, score_addr: Address):
owner = self.get_owner(score_addr)
tmp_sender = self.msg.sender
self.msg.sender = owner
try:
self._context.deploy(tx_hash)
finally:
self.msg.sender = tmp_sender
@external
def rejectScore(self, txHash: bytes, reason: str):
Logger.debug(f'rejectScore: msg.sender = "{self.msg.sender}"', TAG)
if self.msg.sender not in self._auditor_list:
self.revert('Invalid sender: no permission')
tx_params = self.get_deploy_tx_params(txHash)
if tx_params is None:
self.revert('Invalid txHash')
next_audit_tx_hash = self._audit_status[txHash]
if next_audit_tx_hash:
self.revert('Invalid txHash: already accepted')
next_reject_tx_hash = self._reject_status[txHash]
if next_reject_tx_hash:
self.revert('Invalid txHash: already rejected')
Logger.debug(f'rejectScore: score_address = "{tx_params.score_address}", reason = {reason}', TAG)
self._reject_status[txHash] = self.tx.hash
self.Rejected('0x' + txHash.hex(), reason)
@external
def addAuditor(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._auditor_list:
self._auditor_list.put(address)
else:
self.revert(f'Invalid address: already auditor')
if DEBUG is True:
self._print_auditor_list('addAuditor')
@external
def removeAuditor(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if address not in self._auditor_list:
self.revert('Invalid address: not in list')
if self.msg.sender != self.owner:
if self.msg.sender != address:
self.revert('Invalid sender: not yourself')
top = self._auditor_list.pop()
if top != address:
for i in range(len(self._auditor_list)):
if self._auditor_list[i] == address:
self._auditor_list[i] = top
if DEBUG is True:
self._print_auditor_list('removeAuditor')
def _print_auditor_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._auditor_list)}', TAG)
for auditor in self._auditor_list:
Logger.debug(f' --- {auditor}', TAG)
@external
def addDeployer(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._deployer_list:
self._deployer_list.put(address)
else:
self.revert(f'Invalid address: already deployer')
if DEBUG is True:
self._print_deployer_list('addDeployer')
@external
def removeDeployer(self, address: Address):
if address.is_contract:
self.revert(f'Invalid EOA Address: {address}')
if address not in self._deployer_list:
self.revert('Invalid address: not in list')
if self.msg.sender != self.owner:
if self.msg.sender != address:
self.revert('Invalid sender: not yourself')
top = self._deployer_list.pop()
if top != address:
for i in range(len(self._deployer_list)):
if self._deployer_list[i] == address:
self._deployer_list[i] = top
if DEBUG is True:
self._print_deployer_list('removeDeployer')
@external(readonly=True)
def isDeployer(self, address: Address) -> bool:
Logger.debug(f'isDeployer address: {address}', TAG)
return address in self._deployer_list
def _print_deployer_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._deployer_list)}', TAG)
for deployer in self._deployer_list:
Logger.debug(f' --- {deployer}', TAG)
@external
def addToScoreBlackList(self, address: Address):
if not address.is_contract:
self.revert(f'Invalid SCORE Address: {address}')
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if self.address == address:
self.revert("can't add myself")
if address not in self._score_black_list:
self._score_black_list.put(address)
else:
self.revert('Invalid address: already SCORE blacklist')
if DEBUG is True:
self._print_black_list('addScoreToBlackList')
@external
def removeFromScoreBlackList(self, address: Address):
if not address.is_contract:
self.revert(f'Invalid SCORE Address: {address}')
# check message sender, only owner can remove from blacklist
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if address not in self._score_black_list:
self.revert('Invalid address: not in list')
# get the topmost value
top = self._score_black_list.pop()
if top != address:
for i in range(len(self._score_black_list)):
if self._score_black_list[i] == address:
self._score_black_list[i] = top
if DEBUG is True:
self._print_black_list('removeScoreFromBlackList')
@external(readonly=True)
def isInScoreBlackList(self, address: Address) -> bool:
Logger.debug(f'isInBlackList address: {address}', TAG)
return address in self._score_black_list
def _print_black_list(self, header: str):
Logger.debug(f'{header}: list len = {len(self._score_black_list)}', TAG)
for addr in self._score_black_list:
Logger.debug(f' --- {addr}', TAG)
def _set_initial_step_costs(self):
initial_costs = {
STEP_TYPE_DEFAULT: 100_000,
STEP_TYPE_CONTRACT_CALL: 25_000,
STEP_TYPE_CONTRACT_CREATE: 1_000_000_000,
STEP_TYPE_CONTRACT_UPDATE: 1_600_000_000,
STEP_TYPE_CONTRACT_DESTRUCT: -70_000,
STEP_TYPE_CONTRACT_SET: 30_000,
STEP_TYPE_GET: 0,
STEP_TYPE_SET: 320,
STEP_TYPE_REPLACE: 80,
STEP_TYPE_DELETE: -240,
STEP_TYPE_INPUT: 200,
STEP_TYPE_EVENT_LOG: 100,
STEP_TYPE_API_CALL: 0
}
for key, value in initial_costs.items():
self._step_costs[key] = value
def _set_initial_max_step_limits(self):
self._max_step_limits[CONTEXT_TYPE_INVOKE] = 2_500_000_000
self._max_step_limits[CONTEXT_TYPE_QUERY] = 50_000_000
def _set_initial_revision(self):
self._revision_code.set(2)
self._revision_name.set("1.1.0")
@external(readonly=True)
def getStepCosts(self) -> dict:
result = {}
for key, value in self._step_costs.items():
result[key] = value
return result
@external
def setStepCost(self, stepType: str, cost: int):
# only owner can set new step cost
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if cost < 0:
if stepType != STEP_TYPE_CONTRACT_DESTRUCT and \
stepType != STEP_TYPE_DELETE:
self.revert(f'Invalid step cost: {stepType}, {cost}')
self._step_costs[stepType] = cost
self.StepCostChanged(stepType, cost)
@external(readonly=True)
def getMaxStepLimit(self, contextType: str) -> int:
return self._max_step_limits[contextType]
@external
def setMaxStepLimit(self, contextType: str, value: int):
# only owner can set new context type value
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if value < 0:
self.revert('Invalid value: negative number')
if contextType == CONTEXT_TYPE_INVOKE or contextType == CONTEXT_TYPE_QUERY:
self._max_step_limits[contextType] = value
self.MaxStepLimitChanged(contextType, value)
else:
self.revert("Invalid context type")
@external(readonly=True)
def getVersion(self) -> str:
return self._version.get()
def _set_initial_import_white_list(self):
key = "iconservice"
# if iconsevice has no value set ALL
if self._import_white_list[key] == "":
self._import_white_list[key] = "*"
self._import_white_list_keys.put(key)
@external
def addImportWhiteList(self, importStmt: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
import_stmt_dict = {}
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
self.revert(f'Invalid import statement: {e}')
# add to import white list
log_entry = []
for key, value in import_stmt_dict.items():
old_value: str = self._import_white_list[key]
if old_value == "*":
# no need to add
continue
if len(value) == 0:
# set import white list as ALL
self._import_white_list[key] = "*"
# add to import white list keys
if old_value == "":
self._import_white_list_keys.put(key)
# make added item list for eventlog
log_entry.append((key, value))
elif old_value == "":
# set import white list
self._import_white_list[key] = ','.join(value)
# add to import white list keys
self._import_white_list_keys.put(key)
# make added item list for eventlog
log_entry.append((key, value))
else:
old_value_list = old_value.split(',')
new_value = []
for v in value:
if v not in old_value_list:
new_value.append(v)
# set import white list
self._import_white_list[key] = f'{old_value},{",".join(new_value)}'
# make added item list for eventlog
log_entry.append((key, new_value))
# make eventlog
if len(log_entry):
self.AddImportWhiteListLog(str(log_entry), len(log_entry))
if DEBUG is True:
Logger.debug(f'checking added item ({importStmt}): {self.isInImportWhiteList(importStmt)}')
@external
def removeImportWhiteList(self, importStmt: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
import_stmt_dict = {}
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
self.revert(f'Invalid import statement: {e}')
# remove from import white list
log_entry = []
for key, value in import_stmt_dict.items():
old_value: str = self._import_white_list[key]
if old_value == "*":
if len(value) == 0:
# remove import white list
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, value))
continue
if len(value) == 0:
# remove import white list
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, value))
# add to import white list keys
self._import_white_list_keys.put(key)
else:
old_value_list = old_value.split(',')
remove_value = []
new_value = []
for v in old_value_list:
if v in value:
remove_value.append(v)
else:
new_value.append(v)
# set import white list
if len(new_value):
self._import_white_list[key] = f'{",".join(new_value)}'
else:
self._remove_import_white_list(key)
# make added item list for eventlog
log_entry.append((key, remove_value))
if len(log_entry):
# make eventlog
self.AddImportWhiteListLog(str(log_entry), len(log_entry))
if DEBUG is True:
Logger.debug(f'checking removed item ({importStmt}): {self.isInImportWhiteList(importStmt)}')
@external(readonly=True)
def isInImportWhiteList(self, importStmt: str) -> bool:
try:
import_stmt_dict: dict = self._check_import_stmt(importStmt)
except Exception as e:
raise ValueError(f'{e}')
cache_import_white_list = self._get_import_white_list()
for key, value in import_stmt_dict.items():
old_value: list = cache_import_white_list.get(key, None)
if old_value is None:
return False
if old_value[0] == "*":
# import white list has ALL. See next key
continue
if len(value) == 0:
# input is ALL
return False
for v in value:
if v not in old_value:
return False
if DEBUG is True:
Logger.debug(f'({importStmt}) is in import white list')
return True
@staticmethod
def _check_import_stmt(import_stmt: str) -> dict:
Logger.debug(f'check_import_stmt: {import_stmt}')
import_stmt_dict: dict = json_loads(import_stmt.replace("\'", "\""))
for key, value in import_stmt_dict.items():
if not isinstance(key, str):
raise TypeError("Key must be of type `str`")
if not isinstance(value, list):
raise TypeError("Value must be of type `list`")
else:
for v in value:
if not isinstance(v, str):
raise TypeError("Element of value must be of type `str`")
Logger.debug(f'check_import_stmt_dict: {import_stmt_dict}')
return import_stmt_dict
def _get_import_white_list(self) -> dict:
whitelist = {}
for v in self._import_white_list_keys:
values: str = self._import_white_list[v]
whitelist[v] = values.split(',')
return whitelist
def _remove_import_white_list(self, key: str):
# remove from import white list
self._import_white_list.remove(key)
# remove from import white list keys
top = self._import_white_list_keys.pop()
if top != key:
for i in range(len(self._import_white_list_keys)):
if self._import_white_list_keys[i] == key:
self._import_white_list_keys[i] = top
def _set_initial_service_config(self):
self._service_config.set(self.get_icon_service_flag() | 8)
@external
def updateServiceConfig(self, serviceFlag: int):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
if serviceFlag < 0:
self.revert(f'updateServiceConfig: serviceFlag({serviceFlag}) < 0')
max_flag = 0
for flag in IconServiceFlag:
max_flag |= flag
if serviceFlag > max_flag:
self.revert(f'updateServiceConfig: serviceFlag({serviceFlag}) > max_flag({max_flag})')
prev_service_config = self._service_config.get()
if prev_service_config != serviceFlag:
self._service_config.set(serviceFlag)
self.UpdateServiceConfigLog(serviceFlag)
if DEBUG is True:
Logger.debug(f'updateServiceConfig (prev: {prev_service_config} flag: {serviceFlag})')
else:
if DEBUG is True:
Logger.debug(f'updateServiceConfig not update ({serviceFlag})')
@external(readonly=True)
def getServiceConfig(self) -> dict:
table = {}
service_flag = self._service_config.get()
for flag in IconServiceFlag:
if service_flag & flag == flag:
table[flag.name] = True
else:
table[flag.name] = False
return table
@external
def setRevision(self, code: int, name: str):
# only owner can add import white list
if self.msg.sender != self.owner:
self.revert('Invalid sender: not owner')
prev_code = self._revision_code.get()
if code < prev_code:
self.revert(f"can't decrease code")
self._revision_code.set(code)
self._revision_name.set(name)
@external(readonly=True)
def getRevision(self) -> dict:
return {'code': self._revision_code.get(), 'name': self._revision_name.get()}
| true
| true
|
f719631ce5568ca0573b1aff26b681add708c145
| 5,186
|
py
|
Python
|
lib/matplotlib/backends/qt_compat.py
|
pmarshwx/matplotlib
|
12be528dbf2114f7c25abf60de8100cb2d4494af
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/backends/qt_compat.py
|
pmarshwx/matplotlib
|
12be528dbf2114f7c25abf60de8100cb2d4494af
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/backends/qt_compat.py
|
pmarshwx/matplotlib
|
12be528dbf2114f7c25abf60de8100cb2d4494af
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
""" A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
else:
QT_RC_MAJOR_VERSION = 4
QT_API = None
if (QT_API_ENV is not None):
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
else:
QT_API = rcParams['backend.qt4']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
QT_API = QT_API_PYSIDE
cond = ("Could not import sip; falling back on PySide\n"
"in place of PyQt4 or PyQt5.\n")
verbose.report(cond, 'helpful')
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
else: # PyQt5 API
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
else: # try importing pyside
try:
from PySide import QtCore, QtGui, __version__, __version_info__
except ImportError:
raise ImportError(
"Matplotlib qt-based backends require an external PyQt4, PyQt5,\n"
"or PySide package to be installed, but it was not found.")
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
| 33.895425
| 79
| 0.642885
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from matplotlib import rcParams, verbose
QT_API_PYQT = 'PyQt4'
QT_API_PYQTv2 = 'PyQt4v2'
QT_API_PYSIDE = 'PySide'
QT_API_PYQT5 = 'PyQt5'
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
else:
QT_RC_MAJOR_VERSION = 4
QT_API = None
if (QT_API_ENV is not None):
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
else:
QT_API = rcParams['backend.qt4']
_getSaveFileName = None
_sip_imported = False
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
QT_API = QT_API_PYSIDE
cond = ("Could not import sip; falling back on PySide\n"
"in place of PyQt4 or PyQt5.\n")
verbose.report(cond, 'helpful')
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]:
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
else:
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
else:
try:
from PySide import QtCore, QtGui, __version__, __version_info__
except ImportError:
raise ImportError(
"Matplotlib qt-based backends require an external PyQt4, PyQt5,\n"
"or PySide package to be installed, but it was not found.")
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
QtWidgets = QtGui
| true
| true
|
f71963b2a8fde56239fbbd548e6a4f71526ae07c
| 70,771
|
py
|
Python
|
controllers/req.py
|
waidyanatha/deprecated.sambro-eden
|
62e180703a2f16d5f8fcd532335d8287b76a8175
|
[
"MIT"
] | 1
|
2016-12-22T09:31:22.000Z
|
2016-12-22T09:31:22.000Z
|
controllers/req.py
|
waidyanatha/deprecated.sambro-eden
|
62e180703a2f16d5f8fcd532335d8287b76a8175
|
[
"MIT"
] | null | null | null |
controllers/req.py
|
waidyanatha/deprecated.sambro-eden
|
62e180703a2f16d5f8fcd532335d8287b76a8175
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Request Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return s3db.cms_index(module, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# Just redirect to the list of Requests
redirect(URL(f="req", args=["search"]))
# -----------------------------------------------------------------------------
def is_affiliated():
"""
Check if User is affiliated to an Organisation
@ToDo: Move this elsewhere
"""
if not auth.is_logged_in():
return False
elif s3_has_role(ADMIN):
return True
else:
table = auth.settings.table_user
auth_user = db(table.id == auth.user.id).select(table.organisation_id,
limitby=(0, 1)
).first()
if auth_user and auth_user.organisation_id:
return True
else:
return False
# =============================================================================
def create():
""" Redirect to req/create """
redirect(URL(f="req", args="create"))
# -----------------------------------------------------------------------------
def marker_fn(record):
"""
Function to decide which Marker to use for Requests Map
@ToDo: Use Symbology
"""
# Base Icon based on Type
type = record.type
if type in (1, 8):
# Items
marker = "asset"
elif type == 3:
# People
marker = "staff"
#elif type == 6:
# # Food
# marker = "food"
else:
marker = "request"
# Colour code by priority
priority = record.priority
if priority == 3:
# High
marker = "%s_red" % marker
elif priority == 2:
# Medium
marker = "%s_yellow" % marker
#elif priority == 1:
# # Low
# marker = "%s_yellow" % marker
mtable = db.gis_marker
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)).first()
return marker
# -----------------------------------------------------------------------------
def req():
"""
REST Controller for Request Instances
"""
s3.filter = (s3db.req_req.is_template == False)
output = req_controller()
return output
# -----------------------------------------------------------------------------
def req_template():
"""
REST Controller for Request Templates
"""
# Hide fields which aren't relevant to templates
# @ToDo: Need to get this done later after being opened by Types?
table = s3db.req_req
field = table.is_template
field.default = True
field.readable = field.writable = False
s3.filter = (field == True)
if "req_item" in request.args:
# List fields for req_item
table = s3db.req_req_item
list_fields = ["id",
"item_id",
"item_pack_id",
"quantity",
"comments",
]
s3db.configure("req_req_item",
list_fields=list_fields)
elif "req_skill" in request.args:
# List fields for req_skill
table = s3db.req_req_skill
list_fields = ["id",
"skill_id",
"quantity",
"comments",
]
s3db.configure("req_req_skill",
list_fields=list_fields)
else:
# Main Req
fields = ["req_ref",
"date",
"date_required",
"date_required_until",
"date_recv",
"recv_by_id",
"cancel",
"commit_status",
"transit_status",
"fulfil_status",
]
for fieldname in fields:
field = table[fieldname]
field.readable = field.writable = False
table.purpose.label = T("Details")
list_fields = ["id",
"site_id"
]
if len(settings.get_req_req_type()) > 1:
list_fields.append("type")
list_fields.append("priority")
list_fields.append("purpose")
list_fields.append("comments")
s3db.configure("req_req",
list_fields=list_fields)
# CRUD strings
ADD_REQUEST = T("Add Request Template")
s3.crud_strings["req_req"] = Storage(
title_create = ADD_REQUEST,
title_display = T("Request Template Details"),
title_list = T("Request Templates"),
title_update = T("Edit Request Template"),
subtitle_create = ADD_REQUEST,
label_list_button = T("List Request Templates"),
label_create_button = ADD_REQUEST,
label_delete_button = T("Delete Request Template"),
msg_record_created = T("Request Template Added"),
msg_record_modified = T("Request Template Updated"),
msg_record_deleted = T("Request Template Deleted"),
msg_list_empty = T("No Request Templates"))
output = req_controller()
return output
# -----------------------------------------------------------------------------
def req_controller():
""" REST Controller """
def prep(r):
table = r.table
s3.req_prep(r)
#if len(settings.get_req_req_type()) == 1:
# # Remove type from list_fields
# list_fields = s3db.get_config("req_req", "list_fields")
# try:
# list_fields.remove("type")
# except:
# # It has already been removed.
# # This can happen if the req controller is called
# # for a second time, such as when printing reports
# pass
# s3db.configure("req_req", list_fields=list_fields)
type = (r.record and r.record.type) or \
(request.vars.type and int(request.vars.type))
if r.interactive:
# Set the req_item site_id (Requested From), called from action buttons on req/req_item_inv_item/x page
if "req_item_id" in request.vars and "inv_item_id" in request.vars:
iitable = s3db.inv_inv_item
inv_item = db(iitable.id == request.vars.inv_item_id).select(iitable.site_id,
iitable.item_id,
limitby=(0, 1)
).first()
site_id = inv_item.site_id
item_id = inv_item.item_id
# @ToDo: Check Permissions & Avoid DB updates in GETs
db(s3db.req_req_item.id == request.vars.req_item_id).update(site_id = site_id)
response.confirmation = T("%(item)s requested from %(site)s") % \
{"item": s3db.supply_ItemRepresent()(item_id),
"site": s3db.org_SiteRepresent()(site_id)
}
elif "req.site_id" in r.get_vars:
# Called from 'Make new request' button on [siteinstance]/req page
table.site_id.default = request.get_vars.get("req.site_id")
table.site_id.writable = False
if r.http == "POST":
del r.get_vars["req.site_id"]
table.requester_id.represent = requester_represent
# Set Fields and Labels depending on type
if type:
table.type.default = type
# This prevents the type from being edited AFTER it is set
table.type.readable = table.type.writable = False
crud_strings = settings.get_req_req_crud_strings(type)
if crud_strings:
s3.crud_strings["req_req"] = crud_strings
elif type == 1:
s3.crud_strings["req_req"].title_create = T("Make Supplies Request")
elif type == 3:
s3.crud_strings["req_req"].title_create = T("Make People Request")
# Filter the query based on type
if s3.filter:
s3.filter = s3.filter & \
(table.type == type)
else:
s3.filter = (table.type == type)
# These changes are applied via JS in create forms where type is editable
if type == 1: # Item
table.date_recv.readable = table.date_recv.writable = True
if settings.get_req_items_ask_purpose():
table.purpose.label = T("What the Items will be used for")
table.site_id.label = T("Deliver To")
table.request_for_id.label = T("Deliver To")
table.requester_id.label = T("Site Contact")
table.recv_by_id.label = T("Delivered To")
elif type == 3: # Person
table.date_required_until.readable = table.date_required_until.writable = True
table.purpose.label = T("Task Details")
table.purpose.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Task Details"),
T("Include any special requirements such as equipment which they need to bring.")))
table.site_id.label = T("Report To")
table.requester_id.label = T("Volunteer Contact")
table.request_for_id.label = T("Report To")
table.recv_by_id.label = T("Reported To")
if r.component:
if r.component.name == "document":
s3.crud.submit_button = T("Add")
#table = r.component.table
# @ToDo: Fix for Link Table
#table.date.default = r.record.date
#if r.record.site_id:
# stable = db.org_site
# query = (stable.id == r.record.site_id)
# site = db(query).select(stable.location_id,
# stable.organisation_id,
# limitby=(0, 1)).first()
# if site:
# table.location_id.default = site.location_id
# table.organisation_id.default = site.organisation_id
elif r.component.name == "req_item":
ctable = r.component.table
ctable.site_id.writable = ctable.site_id.readable = False
s3.req_hide_quantities(ctable)
elif r.component.name == "req_skill":
s3.req_hide_quantities(r.component.table)
elif r.component.alias == "job":
s3task.configure_tasktable_crud(
function="req_add_from_template",
args = [r.id],
vars = dict(user_id = auth.user is not None and auth.user.id or 0),
period = 86400, # seconds, so 1 day
)
db.scheduler_task.timeout.writable = False
else:
if r.id:
table.is_template.readable = table.is_template.writable = False
method = r.method
if method not in ("map", "read", "search", "update"):
# Hide fields which don't make sense in a Create form
# - includes one embedded in list_create
# - list_fields over-rides, so still visible within list itself
s3.req_create_form_mods()
if type and settings.get_req_inline_forms():
# Inline Forms
s3.req_inline_form(type, method)
# Get the default Facility for this user
#if settings.has_module("hrm"):
# hrtable = s3db.hrm_human_resource
# query = (hrtable.person_id == s3_logged_in_person())
# site = db(query).select(hrtable.site_id,
# limitby=(0, 1)).first()
# if site:
# r.table.site_id.default = site.site_id
# Use site_id in User Profile
if auth.is_logged_in():
if not table.site_id.default:
table.site_id.default = auth.user.site_id
elif method == "map":
# Tell the client to request per-feature markers
s3db.configure("req_req", marker_fn=marker_fn)
elif method == "update":
if settings.get_req_inline_forms():
# Inline Forms
s3.req_inline_form(type, method)
s3.scripts.append("/%s/static/scripts/S3/s3.req_update.js" % appname)
# Prevent Items from being added to closed or cancelled requests
if r.record and (r.record.closed or r.record.cancel):
s3db.configure("req_req_item",
insertable = False)
elif r.representation == "plain":
# Map Popups
pass
elif r.representation == "geojson":
# Load these models now as they'll be needed when we encode
mtable = s3db.gis_marker
s3db.configure("req_req", marker_fn=marker_fn)
if r.component and r.component.name == "commit":
table = r.component.table
record = r.record
stable = s3db.org_site
commit_status = record.commit_status
# Commits belonging to this request
rsites = []
query = (table.deleted == False)&(table.req_id == record.id)
req_sites = db(query).select(table.site_id)
for req_site in req_sites:
rsites += [req_site.site_id]
# All the sites
commit_sites = db((stable.deleted == False)).select(stable.id,
stable.code)
# Sites which have not committed to this request yet
site_opts = {}
for site in commit_sites:
if (site.id not in site_opts) and (site.id not in rsites):
site_opts[site.id] = site.code
table.site_id.requires = IS_IN_SET(site_opts)
if (commit_status == 2) and settings.get_req_restrict_on_complete():
# Restrict from committing to completed requests
s3db.configure(table,
listadd=False)
else:
# Allow commitments to be added when doing so as a component
s3db.configure(table,
listadd = True)
if type == 1: # Items
# Limit site_id to facilities the user has permissions for
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a commitment."))
if r.interactive:
# Dropdown not Autocomplete
itable = s3db.req_commit_item
itable.req_item_id.widget = None
req_id = r.id
s3db.req_commit_item.req_item_id.requires = \
IS_ONE_OF(db,
"req_req_item.id",
s3db.req_item_represent,
orderby = "req_req_item.id",
filterby = "req_id",
filter_opts = [req_id],
sort=True
)
s3.jquery_ready.append('''
S3OptionsFilter({
'triggerName':'req_item_id',
'targetName':'item_pack_id',
'lookupPrefix':'req',
'lookupResource':'req_item_packs',
'lookupKey':'req_item_id',
'lookupField':'id',
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})''')
# Custom Form
s3forms = s3base.s3forms
crud_form = s3forms.S3SQLCustomForm(
"site_id",
"date",
"date_available",
"committer_id",
s3forms.S3SQLInlineComponent(
"commit_item",
label = T("Items"),
fields = ["req_item_id",
"item_pack_id",
"quantity",
"comments"
]
),
"comments",
)
s3db.configure("req_commit", crud_form=crud_form)
# Redirect to the Items tab after creation
#s3db.configure(table,
# create_next = URL(c="req", f="commit",
# args=["[id]", "commit_item"]),
# update_next = URL(c="req", f="commit",
# args=["[id]", "commit_item"]))
elif type == 3: # People
# Limit site_id to orgs the user has permissions for
# @ToDo: Make this customisable between Site/Org
# @ToDo: is_affiliated()
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a commitment."))
# Limit organisation_id to organisations the user has permissions for
#auth.permitted_organisations(table=r.table, redirect_on_error=False)
if r.interactive:
#table.organisation_id.readable = True
#table.organisation_id.writable = True
# Custom Form
s3forms = s3base.s3forms
crud_form = s3forms.S3SQLCustomForm(
"site_id",
"date",
"date_available",
"committer_id",
s3forms.S3SQLInlineComponent(
"commit_skill",
label = T("Skills"),
fields = ["quantity",
"skill_id",
"comments"
]
),
"comments",
)
s3db.configure("req_commit", crud_form=crud_form)
# Redirect to the Skills tab after creation
#s3db.configure(table,
# create_next = URL(c="req", f="commit",
# args=["[id]", "commit_skill"]),
# update_next = URL(c="req", f="commit",
# args=["[id]", "commit_skill"]))
else:
# Non-Item commits can have an Organisation
# Check if user is affiliated to an Organisation
if is_affiliated():
# Limit organisation_id to organisations the user has permissions for
auth.permitted_organisations(table=r.table,
redirect_on_error=False)
table.organisation_id.readable = table.organisation_id.writable = True
else:
# Unaffiliated people can't commit on behalf of others
field = r.component.table.committer_id
field.writable = False
field.comment = None
# Non-Item commits shouldn't have a From Inventory
# @ToDo: Assets do? (Well, a 'From Site')
table.site_id.readable = table.site_id.writable = False
#if r.interactive and r.record.type == 3: # People
# # Redirect to the Persons tab after creation
# s3db.configure(table,
# create_next = URL(c="req", f="commit",
# args=["[id]", "commit_person"]),
# update_next = URL(c="req", f="commit",
# args=["[id]", "commit_person"])
# )
else:
# Limit site_id to facilities the user has permissions for
# @ToDo: Non-Item requests shouldn't be bound to a Facility?
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a request."))
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive and r.method != "import":
if not r.component:
s3_action_buttons(r)
#s3_action_buttons(r, copyable=True)
# if "buttons" in output:
# buttons = output["buttons"]
# if "delete_btn" in buttons:
# delete_btn = buttons["delete_btn"]
# delete_btn = DIV(delete_btn,
# A(T("Copy Request"),
# _href=URL(args=[r.id, "copy"],
##vars={"type":r.record.type}
# ),
# _class="action-btn"))
# output["buttons"]["delete_btn"] = delete_btn
if settings.get_req_use_commit():
# This is appropriate to all
s3.actions.append(
dict(url = URL(c="req", f="req",
args=["[id]", "commit_all"]),
_class = "action-btn commit-btn",
label = str(T("Commit"))
)
)
s3.jquery_ready.append(
'''S3ConfirmClick('.commit-btn','%s')''' % T("Do you want to commit to this request?"))
# This is only appropriate for item requests
#query = (r.table.type == 1)
#rows = db(query).select(r.table.id)
#restrict = [str(row.id) for row in rows]
#s3.actions.append(
# dict(url = URL(c="req", f="req",
# args=["[id]", "req_item"]),
# _class = "action-btn",
# label = str(T("View Items")),
# restrict = restrict
# )
# )
# This is only appropriate for people requests
#query = (r.table.type == 3)
#rows = db(query).select(r.table.id)
#restrict = [str(row.id) for row in rows]
#s3.actions.append(
# dict(url = URL(c="req", f="req",
# args=["[id]", "req_skill"]),
# _class = "action-btn",
# label = str(T("View Skills")),
# restrict = restrict
# )
# )
s3.actions.append(
dict(url = URL(c="req", f="req",
args=["[id]", "commit_all", "send"]),
_class = "action-btn send-btn",
label = str(T("Send"))
)
)
s3.jquery_ready.append(
'''S3ConfirmClick('.send-btn','%s')''' % T("Are you sure you want to commit to this request and send a shipment?"))
else:
s3_action_buttons(r)
if r.component.name == "req_item" and settings.get_req_prompt_match():
req_item_inv_item_btn = dict(url = URL(c = "req",
f = "req_item_inv_item",
args = ["[id]"]
),
_class = "action-btn",
label = str(T("Request from Facility")),
)
s3.actions.append(req_item_inv_item_btn)
if r.component.name == "commit":
if "form" in output:
id = r.record.id
ctable = s3db.req_commit
query = (ctable.deleted == False) & \
(ctable.req_id == id)
exists = current.db(query).select(ctable.id, limitby=(0, 1))
if not exists:
output["form"] = A(T("Commit All"),
_href=URL(args=[id, "commit_all"]),
_class="action-btn",
_id="commit-btn")
s3.jquery_ready.append('''
S3ConfirmClick('#commit-btn','%s')''' % T("Do you want to commit to this request?"))
else:
s3.actions.append(
dict(url = URL(c="req", f="send_commit",
args = ["[id]"]),
_class = "action-btn send-btn",
label = str(T("Prepare Shipment"))
)
)
s3.jquery_ready.append(
'''S3ConfirmClick('.send-btn','%s')''' % T("Are you sure you want to send this shipment?"))
if r.component.alias == "job":
s3.actions = [
dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="req", f="req_template",
args=[str(r.id), "job", "[id]"])),
dict(label=str(T("Reset")),
_class="action-btn",
url=URL(c="req", f="req_template",
args=[str(r.id), "job", "[id]", "reset"])),
dict(label=str(T("Run Now")),
_class="action-btn",
url=URL(c="req", f="req_template",
args=[str(r.id), "job", "[id]", "run"])),
]
return output
s3.postp = postp
output = s3_rest_controller("req", "req",
rheader=s3db.req_rheader)
return output
# =============================================================================
def requester_represent(id, show_link=True):
"""
Represent a Requester as Name + Tel#
"""
if not id:
return current.messages["NONE"]
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (htable.id == id) & \
(htable.person_id == ptable.id)
left = ctable.on((ctable.pe_id == ptable.pe_id) & \
(ctable.contact_method == "SMS"))
row = db(query).select(htable.type,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
ctable.value,
left=left,
limitby=(0, 1)).first()
try:
hr = row["hrm_human_resource"]
except:
return current.messages.UNKNOWN_OPT
repr = s3_fullname(row.pr_person)
if row.pr_contact.value:
repr = "%s %s" % (repr, row.pr_contact.value)
if show_link:
if hr.type == 1:
controller = "hrm"
group = "staff"
else:
controller = "vol"
group = "volunteer"
request.extension = "html"
return A(repr,
_href = URL(c = controller,
f = "person",
args = ["contacts"],
vars = {"group": group,
"human_resource.id": id}
)
)
return repr
# =============================================================================
def req_item():
"""
REST Controller
@ToDo: Filter out fulfilled Items?
"""
if not s3.filter:
# Filter out Template Items
ritable = s3db.req_req_item
rtable = db.req_req
s3.filter = (rtable.is_template == False) & \
(rtable.id == ritable.req_id)
# Search method
search_method = s3db.get_config("req_req_item", "search_method")
if not search_method:
S3SearchOptionsWidget = s3base.S3SearchOptionsWidget
req_item_search = (
S3SearchOptionsWidget(
name="req_search_fulfil_status",
label=T("Status"),
field="req_id$fulfil_status",
options = s3.req_status_opts,
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_priority",
label=T("Priority"),
field="req_id$priority",
options = s3.req_priority_opts,
cols = 3,
),
#S3SearchOptionsWidget(
# name="req_search_L1",
# field="req_id$site_id$location_id$L1",
# location_level="L1",
# cols = 3,
#),
#S3SearchOptionsWidget(
# name="req_search_L2",
# field="req_id$site_id$location_id$L2",
# location_level="L2",
# cols = 3,
#),
S3SearchOptionsWidget(
name="req_search_L3",
field="req_id$site_id$location_id$L3",
location_level="L3",
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_L4",
field="req_id$site_id$location_id$L4",
location_level="L4",
cols = 3,
),
)
s3db.configure("req_req_item",
search_method = s3base.S3Search(advanced=req_item_search),
)
def prep(r):
if r.interactive:
list_fields = s3db.get_config("req_req_item", "list_fields")
list_fields.insert(1, "req_id$site_id")
list_fields.insert(1, "req_id$site_id$location_id$L4")
list_fields.insert(1, "req_id$site_id$location_id$L3")
s3db.configure("req_req_item",
insertable = False,
list_fields = list_fields,
)
s3.crud_strings["req_req_item"].title_list = T("Requested Items")
if r.method != None and r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# - includes one embedded in list_create
# - list_fields over-rides, so still visible within list itself
s3db.req_hide_quantities(r.table)
return True
s3.prep = prep
output = s3_rest_controller("req", "req_item")
if settings.get_req_prompt_match():
req_item_inv_item_btn = dict(url = URL(c="req", f="req_item_inv_item",
args=["[id]"]),
_class = "action-btn",
label = str(T("Request from Facility")),
)
if s3.actions:
s3.actions += [req_item_inv_item_btn]
else:
s3.actions = [req_item_inv_item_btn]
return output
# -----------------------------------------------------------------------------
def req_item_packs():
"""
Called by S3OptionsFilter to provide the pack options for an Item
"""
req_item_id = None
args = request.args
if len(args) == 1 and args[0].isdigit():
req_item_id = args[0]
else:
for v in request.vars:
if "." in v and v.split(".", 1)[1] == "req_item_id":
req_item_id = request.vars[v]
break
table = s3db.supply_item_pack
ritable = s3db.req_req_item
query = (ritable.id == req_item_id) & \
(ritable.item_id == table.item_id)
response.headers["Content-Type"] = "application/json"
return db(query).select(table.id,
table.name,
table.quantity).json()
# -----------------------------------------------------------------------------
def req_item_inv_item():
"""
Shows the inventory items which match a requested item
@ToDo: Make this page a component of req_item
"""
req_item_id = request.args[0]
request.args = [] #
ritable = s3db.req_req_item
req_item = ritable[req_item_id]
rtable = s3db.req_req
req = rtable[req_item.req_id]
output = {}
output["title"] = T("Request Stock from Available Warehouse")
output["req_btn"] = A(T("Return to Request"),
_href = URL(c="req", f="req",
args=[req_item.req_id, "req_item"]),
_class = "action-btn"
)
output["req_item"] = TABLE( TR(
TH( "%s: " % T("Requested By") ),
rtable.site_id.represent(req.site_id),
TH( "%s: " % T("Item")),
ritable.item_id.represent(req_item.item_id),
),
TR(
TH( "%s: " % T("Requester") ),
rtable.requester_id.represent(req.requester_id),
TH( "%s: " % T("Quantity")),
req_item.quantity,
),
TR(
TH( "%s: " % T("Date Requested") ),
rtable.date.represent(req.date),
TH( T("Quantity Committed")),
req_item.quantity_commit,
),
TR(
TH( "%s: " % T("Date Required") ),
rtable.date_required.represent(req.date_required),
TH( "%s: " % T("Quantity in Transit")),
req_item.quantity_transit,
),
TR(
TH( "%s: " % T("Priority") ),
rtable.priority.represent(req.priority),
TH( "%s: " % T("Quantity Fulfilled")),
req_item.quantity_fulfil,
)
)
s3.no_sspag = True # pagination won't work with 2 datatables on one page @todo: test
itable = s3db.inv_inv_item
# Get list of matching inventory items
s3.filter = (itable.item_id == req_item.item_id)
# Tweak CRUD String for this context
s3.crud_strings["inv_inv_item"].msg_list_empty = T("No Inventories currently have this item in stock")
inv_items = s3_rest_controller("inv", "inv_item")
output["items"] = inv_items["items"]
if current.deployment_settings.get_supply_use_alt_name():
# Get list of alternative inventory items
atable = s3db.supply_item_alt
query = (atable.item_id == req_item.item_id ) & \
(atable.deleted == False )
alt_item_rows = db(query).select(atable.alt_item_id)
alt_item_ids = [alt_item_row.alt_item_id for alt_item_row in alt_item_rows]
if alt_item_ids:
s3.filter = (itable.item_id.belongs(alt_item_ids))
inv_items_alt = s3_rest_controller("inv", "inv_item")
output["items_alt"] = inv_items_alt["items"]
else:
output["items_alt"] = T("No Inventories currently have suitable alternative items in stock")
response.view = "req/req_item_inv_item.html"
s3.actions = [dict(url = URL(c = request.controller,
f = "req",
args = [req_item.req_id, "req_item"],
vars = dict(req_item_id = req_item_id,
inv_item_id = "[id]")
),
_class = "action-btn",
label = str(T("Request From")),
)]
return output
# =============================================================================
def req_skill():
"""
REST Controller
@ToDo: Filter out fulfilled Skills?
"""
# Filter out Template Items
table = s3db.req_req_skill
rtable = s3db.req_req
s3.filter = (rtable.is_template == False) & \
(rtable.id == table.req_id)
# Search method
S3SearchOptionsWidget = s3base.S3SearchOptionsWidget
req_skill_search = (
S3SearchOptionsWidget(
name="req_search_fulfil_status",
label=T("Status"),
field="req_id$fulfil_status",
options = s3.req_status_opts,
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_priority",
label=T("Priority"),
field="req_id$priority",
options = s3.req_priority_opts,
cols = 3,
),
#S3SearchOptionsWidget(
# name="req_search_L1",
# field="req_id$site_id$location_id$L1",
# location_level="L1",
# cols = 3,
#),
#S3SearchOptionsWidget(
# name="req_search_L2",
# field="req_id$site_id$location_id$L2",
# location_level="L2",
# cols = 3,
#),
S3SearchOptionsWidget(
name="req_search_L3",
field="req_id$site_id$location_id$L3",
location_level="L3",
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_L4",
field="req_id$site_id$location_id$L4",
location_level="L4",
cols = 3,
),
)
s3db.configure("req_req_skill",
search_method = s3base.S3Search(advanced=req_skill_search),
)
def prep(r):
if r.interactive:
list_fields = s3db.get_config("req_req_skill", "list_fields")
list_fields.insert(1, "req_id$site_id")
list_fields.insert(1, "req_id$site_id$location_id$L4")
list_fields.insert(1, "req_id$site_id$location_id$L3")
s3db.configure("req_req_skill",
insertable=False,
list_fields = list_fields,
)
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# - includes one embedded in list_create
# - list_fields over-rides, so still visible within list itself
s3db.req_hide_quantities(r.table)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
s3.actions = [
dict(url = URL(c="req", f="req",
args=["req_skill", "[id]"]),
_class = "action-btn",
label = str(READ)
)
]
return output
s3.postp = postp
output = s3_rest_controller("req", "req_skill")
return output
# =============================================================================
def summary_option():
""" REST Controller """
return s3_rest_controller()
# =============================================================================
def commit():
""" REST Controller """
# Check if user is affiliated to an Organisation
if not is_affiliated():
tablename = "req_commit_person"
table = s3db[tablename]
# Unaffiliated people can't commit on behalf of others
table.person_id.writable = False
# & can only make single-person commitments
# (This should have happened in the main commitment)
s3db.configure(tablename,
insertable=False)
def prep(r):
if r.interactive:
# Commitments created through UI should be done via components
table = r.table
if r.record:
s3.crud.submit_button = T("Save Changes")
if r.record.type == 1: # Items
# Limit site_id to facilities the user has permissions for
auth.permitted_facilities(table=table,
error_msg=T("You do not have permission for any facility to make a commitment.") )
table.site_id.comment = A(T("Set as default Site"),
_id="req_commit_site_id_link",
_target="_blank",
_href=URL(c="default",
f="user",
args=["profile"]))
jappend = s3.jquery_ready.append
jappend('''
$('#req_commit_site_id_link').click(function(){
var site_id=$('#req_commit_site_id').val()
if(site_id){
var url = $('#req_commit_site_id_link').attr('href')
var exists=url.indexOf('?')
if(exists=='-1'){
$('#req_commit_site_id_link').attr('href',url+'?site_id='+site_id)
}
}
return true
})''')
# Dropdown not Autocomplete
itable = s3db.req_commit_item
itable.req_item_id.widget = None
jappend('''
S3OptionsFilter({
'triggerName':'req_item_id',
'targetName':'item_pack_id',
'lookupPrefix':'req',
'lookupResource':'req_item_packs',
'lookupKey':'req_item_id',
'lookupField':'id',
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})''')
# Custom Form
s3forms = s3base.s3forms
crud_form = s3forms.S3SQLCustomForm(
"site_id",
"date",
"date_available",
"committer_id",
s3forms.S3SQLInlineComponent(
"commit_item",
label = T("Items"),
fields = ["req_item_id",
"item_pack_id",
"quantity",
"comments"
]
),
"comments",
)
s3db.configure("req_commit", crud_form=crud_form)
elif r.record.type == 3: # People
# Limit site_id to sites the user has permissions for
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a commitment."))
table.site_id.comment = A(T("Set as default Site"),
_id="req_commit_site_id_link",
_target="_blank",
_href=URL(c="default",
f="user",
args=["profile"]))
# Limit organisation_id to organisations the user has permissions for
#auth.permitted_organisations(table=r.table, redirect_on_error=False)
#table.organisation_id.readable = True
#table.organisation_id.writable = True
# Custom Form
s3forms = s3base.s3forms
crud_form = s3forms.S3SQLCustomForm(
#"organisation_id",
"site_id",
"date",
"date_available",
"committer_id",
s3forms.S3SQLInlineComponent(
"commit_skill",
label = T("People"),
fields = ["quantity",
"skill_id",
"comments"
]
),
"comments",
)
s3db.configure("req_commit", crud_form=crud_form)
else:
# Commits to Other requests can have an Organisation
# Limit organisation_id to organisations the user has permissions for
auth.permitted_organisations(table=r.table, redirect_on_error=False)
table.organisation_id.readable = True
table.organisation_id.writable = True
# Non-Item commits shouldn't have a From Inventory
# @ToDo: Assets do?
table.site_id.readable = False
table.site_id.writable = False
if r.component:
req_id = r.record.req_id
if r.component.name == "commit_item":
# Limit commit items to items from the request
s3db.req_commit_item.req_item_id.requires = \
IS_ONE_OF(db,
"req_req_item.id",
s3db.req_item_represent,
orderby = "req_req_item.id",
filterby = "req_id",
filter_opts = [req_id],
sort=True
)
elif r.component.name == "person":
pass
# Limit commit skills to skills from the request
#db.req_commit_skill.req_skill_id.requires = \
# IS_ONE_OF(db,
# "req_req_skill.id",
# s3db.req_skill_represent,
# orderby = "req_req_skill.id",
# filterby = "req_id",
# filter_opts = [req_id],
# sort=True
# )
return True
s3.prep = prep
def postp(r, output):
if r.interactive and r.method != "import":
if not r.component:
table = r.table
record = r.record
s3_action_buttons(r)
s3.actions.append(
dict(url = URL(f = "send_commit",
args=["[id]"]),
_class = "action-btn send-btn",
label = str(T("Prepare Shipment"))
)
)
s3.jquery_ready.append(
'''S3ConfirmClick('.send-btn','%s')''' % T("Are you sure you want to send this shipment?"))
return output
s3.postp = postp
output = s3_rest_controller(rheader=commit_rheader)
return output
# -----------------------------------------------------------------------------
def commit_rheader(r):
""" Resource Header for Commitments """
if r.representation == "html":
record = r.record
if record and r.name == "commit":
s3_date_represent = s3base.S3DateTime.date_represent
tabs = [(T("Edit Details"), None)]
type = record.type and int(record.type)
table = r.table
if type == 1:
tabs.append((T("Items"), "commit_item"))
#req_record = db.req_req[record.req_id]
#req_date = req_record.date
rheader = DIV(TABLE(TR(TH("%s: " % table.req_id.label),
table.req_id.represent(record.req_id),
),
TR(TH("%s: " % T("Committing Warehouse")),
s3db.org_site_represent(record.site_id),
TH("%s: " % T("Commit Date")),
s3_date_represent(record.date),
),
TR(TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3)
),
),
)
prepare_btn = A(T("Prepare Shipment"),
_href = URL(f = "send_commit",
args = [record.id]
),
_id = "send_commit",
_class = "action-btn"
)
s3.rfooter = TAG[""](prepare_btn)
# send_btn = A( T("Send Commitment as Shipment"),
# _href = URL(f = "send_commit",
# args = [record.id]
# ),
# _id = "send_commit",
# _class = "action-btn"
# )
#
# send_btn_confirm = SCRIPT("S3ConfirmClick('#send_commit', '%s')" %
# T("Do you want to send these Committed items?") )
# s3.rfooter = TAG[""](send_btn,send_btn_confirm)
#rheader.append(send_btn)
#rheader.append(send_btn_confirm)
elif type == 3:
#tabs.append((T("People"), "commit_person"))
tabs.append((T("People"), "commit_skill"))
#req_record = db.req_req[record.req_id]
#req_date = req_record.date
organisation_represent = s3db.org_organisation_represent
rheader = DIV(TABLE(TR(TH("%s: " % table.req_id.label),
table.req_id.represent(record.req_id),
),
TR(TH("%s: " % T("Committing Organization")),
organisation_represent(record.organisation_id),
TH("%s: " % T("Commit Date")),
s3_date_represent(record.date),
),
TR(TH("%s: " % table.comments.label),
TD(record.comments, _colspan=3)
),
),
)
else:
# Other (& Assets/Shelter)
rheader = DIV(TABLE(TR(TH("%s: " % table.req_id.label),
table.req_id.represent(record.req_id),
),
TR(TH("%s: " % T("Committing Person")),
table.committer_id.represent(record.committer_id),
TH("%s: " % T("Commit Date")),
s3_date_represent(record.date),
),
TR(TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3)
),
),
)
rheader_tabs = s3_rheader_tabs(r,
tabs)
rheader.append(rheader_tabs)
return rheader
return None
# =============================================================================
def send():
""" RESTful CRUD controller """
s3db.configure("inv_send",
listadd=False)
return s3db.inv_send_controller()
# ==============================================================================
def send_commit():
"""
Send a Shipment containing all items in a Commitment
"""
return s3db.req_send_commit()
# -----------------------------------------------------------------------------
def send_process():
""" Process a Shipment """
return s3db.inv_send_process()
# =============================================================================
def commit_item():
""" REST Controller """
return s3_rest_controller()
# =============================================================================
def commit_req():
"""
Function to commit items for a Request
- i.e. copy data from a req into a commitment
arg: req_id
vars: site_id
"""
req_id = request.args[0]
site_id = request.vars.get("site_id")
table = s3db.req_req
r_req = db(table.id == req_id).select(table.type,
limitby=(0, 1)).first()
# User must have permissions over facility which is sending
(prefix, resourcename, id) = s3db.get_instance(s3db.org_site, site_id)
if not site_id or not auth.s3_has_permission("update",
"%s_%s" % (prefix,
resourcename),
record_id=id):
session.error = T("You do not have permission to make this commitment.")
redirect(URL(c="req", f="req",
args=[req_id]))
# Create a new commit record
commit_id = s3db.req_commit.insert(date = request.utcnow,
req_id = req_id,
site_id = site_id,
type = r_req.type
)
# Only select items which are in the warehouse
ritable = s3db.req_req_item
iitable = s3db.inv_inv_item
query = (ritable.req_id == req_id) & \
(ritable.quantity_fulfil < ritable.quantity) & \
(iitable.site_id == site_id) & \
(ritable.item_id == iitable.item_id) & \
(ritable.deleted == False) & \
(iitable.deleted == False)
req_items = db(query).select(ritable.id,
ritable.quantity,
ritable.item_pack_id,
iitable.item_id,
iitable.quantity,
iitable.item_pack_id)
citable = s3db.req_commit_item
for req_item in req_items:
req_item_quantity = req_item.req_req_item.quantity * \
req_item.req_req_item.pack_quantity
inv_item_quantity = req_item.inv_inv_item.quantity * \
req_item.inv_inv_item.pack_quantity
if inv_item_quantity > req_item_quantity:
commit_item_quantity = req_item_quantity
else:
commit_item_quantity = inv_item_quantity
commit_item_quantity = commit_item_quantity / req_item.req_req_item.pack_quantity
if commit_item_quantity:
req_item_id = req_item.req_req_item.id
commit_item_id = citable.insert(commit_id = commit_id,
req_item_id = req_item_id,
item_pack_id = req_item.req_req_item.item_pack_id,
quantity = commit_item_quantity
)
# Update the req_item.commit_quantity & req.commit_status
s3mgr.store_session("req", "commit_item", commit_item_id)
form = Storage()
form.vars = Storage(
req_item_id = req_item_id
)
s3db.req_commit_item_onaccept(form)
# Redirect to commit
redirect(URL(c="req", f="commit",
args=[commit_id, "commit_item"]))
# =============================================================================
def send_req():
"""
Function to send items for a Request.
- i.e. copy data from a req into a send
arg: req_id
vars: site_id
"""
req_id = request.args[0]
site_id = request.vars.get("site_id", None)
site_name = s3db.org_site_represent(site_id, show_link=False)
ritable = s3db.req_req_item
iitable = s3db.inv_inv_item
sendtable = s3db.inv_send
tracktable = s3db.inv_track_item
siptable = s3db.supply_item_pack
table = s3db.req_req
r_req = db(table.id == req_id).select(table.req_ref,
table.requester_id,
table.site_id,
limitby=(0, 1)).first()
# User must have permissions over facility which is sending
(prefix, resourcename, id) = s3db.get_instance(db.org_site, site_id)
if not site_id or not auth.s3_has_permission("update",
"%s_%s" % (prefix,
resourcename),
record_id=id):
session.error = T("You do not have permission to send this shipment.")
redirect(URL(c="req", f="req",
args = [req_id]))
# Create a new send record
code = s3db.inv_get_shipping_code("WB",
site_id,
s3db.inv_send.send_ref
)
send_id = sendtable.insert(send_ref = code,
req_ref = r_req.req_ref,
sender_id = auth.s3_logged_in_person(),
site_id = site_id,
date = request.utcnow,
recipient_id = r_req.requester_id,
to_site_id = r_req.site_id,
status = s3db.inv_ship_status["IN_PROCESS"],
)
# Get the items for this request that have not been fulfilled (in transit)
sip_id_field = siptable.id
sip_quantity_field = siptable.quantity
query = (ritable.req_id == req_id) & \
(ritable.quantity_transit < ritable.quantity) & \
(ritable.deleted == False) & \
(ritable.item_pack_id == sip_id_field)
req_items = db(query).select(ritable.id,
ritable.quantity,
ritable.quantity_transit,
ritable.quantity_fulfil,
ritable.item_id,
sip_quantity_field
)
# Loop through each request item and find matched in the site inventory
IN_PROCESS = s3db.inv_tracking_status["IN_PROCESS"]
insert = tracktable.insert
inv_remove = s3db.inv_remove
ii_item_id_field = iitable.item_id
ii_quantity_field = iitable.quantity
ii_expiry_field = iitable.expiry_date
ii_purchase_field = iitable.purchase_date
iifields = [iitable.id,
ii_item_id_field,
ii_quantity_field,
iitable.item_pack_id,
iitable.pack_value,
iitable.currency,
ii_expiry_field,
ii_purchase_field,
iitable.bin,
iitable.owner_org_id,
iitable.supply_org_id,
sip_quantity_field,
]
bquery = (ii_quantity_field > 0) & \
(iitable.site_id == site_id) & \
(iitable.deleted == False) & \
(iitable.item_pack_id == sip_id_field)
orderby = ii_expiry_field | ii_purchase_field
no_match = True
for ritem in req_items:
rim = ritem.req_req_item
rim_id = rim.id
query = bquery & \
(ii_item_id_field == rim.item_id)
inv_items = db(query).select(*iifields,
orderby=orderby)
if len(inv_items) == 0:
break;
no_match = False
one_match = len(inv_items) == 1
# Get the Quantity Needed
quantity_shipped = max(rim.quantity_transit, rim.quantity_fulfil)
quantity_needed = (rim.quantity - quantity_shipped) * ritem.supply_item_pack.quantity
# Insert the track item records
# If there is more than one item match then we select the stock with the oldest expiry date first
# then the oldest purchase date first
# then a complete batch, if-possible
iids = []
append = iids.append
for item in inv_items:
if not quantity_needed:
break
iitem = item.inv_inv_item
if one_match:
# Remove this total from the warehouse stock
send_item_quantity = inv_remove(iitem, quantity_needed)
quantity_needed -= send_item_quantity
append(iitem.id)
else:
quantity_available = iitem.quantity * item.supply_item_pack.quantity
if iitem.expiry_date:
# We take first from the oldest expiry date
send_item_quantity = min(quantity_needed, quantity_available)
# Remove this total from the warehouse stock
send_item_quantity = inv_remove(iitem, send_item_quantity)
quantity_needed -= send_item_quantity
append(iitem.id)
elif iitem.purchase_date:
# We take first from the oldest purchase date for non-expiring stock
send_item_quantity = min(quantity_needed, quantity_available)
# Remove this total from the warehouse stock
send_item_quantity = inv_remove(iitem, send_item_quantity)
quantity_needed -= send_item_quantity
append(iitem.id)
elif quantity_needed <= quantity_available:
# Assign a complete batch together if possible
# Remove this total from the warehouse stock
send_item_quantity = inv_remove(iitem, quantity_needed)
quantity_needed = 0
append(iitem.id)
else:
# Try again on the second loop, if-necessary
continue
insert(send_id = send_id,
send_inv_item_id = iitem.id,
item_id = iitem.item_id,
req_item_id = rim_id,
item_pack_id = iitem.item_pack_id,
quantity = send_item_quantity,
status = IN_PROCESS,
pack_value = iitem.pack_value,
currency = iitem.currency,
bin = iitem.bin,
expiry_date = iitem.expiry_date,
owner_org_id = iitem.owner_org_id,
supply_org_id = iitem.supply_org_id,
#comments = comment,
)
# 2nd pass
for item in inv_items:
if not quantity_needed:
break
iitem = item.inv_inv_item
if iitem.id in iids:
continue
# We have no way to know which stock we should take 1st so show all with quantity 0 & let the user decide
send_item_quantity = 0
insert(send_id = send_id,
send_inv_item_id = iitem.id,
item_id = iitem.item_id,
req_item_id = rim_id,
item_pack_id = iitem.item_pack_id,
quantity = send_item_quantity,
status = IN_PROCESS,
pack_value = iitem.pack_value,
currency = iitem.currency,
bin = iitem.bin,
expiry_date = iitem.expiry_date,
owner_org_id = iitem.owner_org_id,
supply_org_id = iitem.supply_org_id,
#comments = comment,
)
if no_match:
session.warning = \
T("%(site)s has no items exactly matching this request. There may still be other items in stock which can fulfill this request!") % \
dict(site=site_name)
# Redirect to view the list of items in the Send
redirect(URL(c = "inv",
f = "send",
args = [send_id, "track_item"])
)
# =============================================================================
def commit_item_json():
"""
"""
ctable = s3db.req_commit
itable = s3db.req_commit_item
stable = s3db.org_site
#ctable.date.represent = lambda dt: dt[:10]
query = (itable.req_item_id == request.args[0]) & \
(ctable.id == itable.commit_id) & \
(ctable.site_id == stable.id) & \
(itable.deleted == False)
records = db(query).select(ctable.id,
ctable.date,
stable.name,
itable.quantity,
orderby = db.req_commit.date)
json_str = '''[%s,%s''' % (json.dumps(dict(id = str(T("Committed")),
quantity = "#")),
records.json()[1:])
response.headers["Content-Type"] = "application/json"
return json_str
# =============================================================================
def fema():
"""
Custom Report to list all open requests for items that FEMA can supply
@ToDo: Filter to just Sites that FEMA support
"""
ritable = s3db.req_req_item
rtable = db.req_req
itable = db.supply_item
ictable = db.supply_item_category
citable = db.supply_catalog_item
query = (ictable.name == "FEMA") & \
(citable.item_category_id == ictable.id) & \
(citable.item_id == itable.id) & \
(itable.deleted != True)
fema_items = db(query).select(itable.id)
fema_item_ids = [item.id for item in fema_items]
REQ_STATUS_COMPLETE = 2
s3.filter = (rtable.deleted != True) & \
(rtable.is_template == False) & \
(rtable.commit_status != REQ_STATUS_COMPLETE) & \
(rtable.transit_status != REQ_STATUS_COMPLETE) & \
(rtable.fulfil_status != REQ_STATUS_COMPLETE) & \
(ritable.req_id == rtable.id) & \
(ritable.quantity > ritable.quantity_commit) & \
(ritable.quantity > ritable.quantity_transit) & \
(ritable.quantity > ritable.quantity_fulfil) & \
(ritable.deleted != True) & \
(ritable.item_id.belongs(fema_item_ids))
# Search method
req_item_search = [
s3base.S3SearchOptionsWidget(
name="req_search_site",
field="req_id$site_id",
label = T("Facility"),
cols = 3,
),
]
s3db.configure("req_req_item",
search_method = s3base.S3Search(advanced=req_item_search),
)
output = req_item()
return output
# END =========================================================================
| 41.9508
| 145
| 0.445889
|
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
def index():
return s3db.cms_index(module, alt_function="index_alt")
def index_alt():
redirect(URL(f="req", args=["search"]))
def is_affiliated():
if not auth.is_logged_in():
return False
elif s3_has_role(ADMIN):
return True
else:
table = auth.settings.table_user
auth_user = db(table.id == auth.user.id).select(table.organisation_id,
limitby=(0, 1)
).first()
if auth_user and auth_user.organisation_id:
return True
else:
return False
def create():
redirect(URL(f="req", args="create"))
def marker_fn(record):
type = record.type
if type in (1, 8):
marker = "asset"
elif type == 3:
marker = "staff"
else:
marker = "request"
priority = record.priority
if priority == 3:
marker = "%s_red" % marker
elif priority == 2:
marker = "%s_yellow" % marker
mtable = db.gis_marker
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)).first()
return marker
def req():
s3.filter = (s3db.req_req.is_template == False)
output = req_controller()
return output
def req_template():
# @ToDo: Need to get this done later after being opened by Types?
table = s3db.req_req
field = table.is_template
field.default = True
field.readable = field.writable = False
s3.filter = (field == True)
if "req_item" in request.args:
# List fields for req_item
table = s3db.req_req_item
list_fields = ["id",
"item_id",
"item_pack_id",
"quantity",
"comments",
]
s3db.configure("req_req_item",
list_fields=list_fields)
elif "req_skill" in request.args:
# List fields for req_skill
table = s3db.req_req_skill
list_fields = ["id",
"skill_id",
"quantity",
"comments",
]
s3db.configure("req_req_skill",
list_fields=list_fields)
else:
# Main Req
fields = ["req_ref",
"date",
"date_required",
"date_required_until",
"date_recv",
"recv_by_id",
"cancel",
"commit_status",
"transit_status",
"fulfil_status",
]
for fieldname in fields:
field = table[fieldname]
field.readable = field.writable = False
table.purpose.label = T("Details")
list_fields = ["id",
"site_id"
]
if len(settings.get_req_req_type()) > 1:
list_fields.append("type")
list_fields.append("priority")
list_fields.append("purpose")
list_fields.append("comments")
s3db.configure("req_req",
list_fields=list_fields)
# CRUD strings
ADD_REQUEST = T("Add Request Template")
s3.crud_strings["req_req"] = Storage(
title_create = ADD_REQUEST,
title_display = T("Request Template Details"),
title_list = T("Request Templates"),
title_update = T("Edit Request Template"),
subtitle_create = ADD_REQUEST,
label_list_button = T("List Request Templates"),
label_create_button = ADD_REQUEST,
label_delete_button = T("Delete Request Template"),
msg_record_created = T("Request Template Added"),
msg_record_modified = T("Request Template Updated"),
msg_record_deleted = T("Request Template Deleted"),
msg_list_empty = T("No Request Templates"))
output = req_controller()
return output
# -----------------------------------------------------------------------------
def req_controller():
def prep(r):
table = r.table
s3.req_prep(r)
#if len(settings.get_req_req_type()) == 1:
# # Remove type from list_fields
# list_fields = s3db.get_config("req_req", "list_fields")
# try:
# list_fields.remove("type")
# except:
# # It has already been removed.
# # This can happen if the req controller is called
# # for a second time, such as when printing reports
# pass
# s3db.configure("req_req", list_fields=list_fields)
type = (r.record and r.record.type) or \
(request.vars.type and int(request.vars.type))
if r.interactive:
# Set the req_item site_id (Requested From), called from action buttons on req/req_item_inv_item/x page
if "req_item_id" in request.vars and "inv_item_id" in request.vars:
iitable = s3db.inv_inv_item
inv_item = db(iitable.id == request.vars.inv_item_id).select(iitable.site_id,
iitable.item_id,
limitby=(0, 1)
).first()
site_id = inv_item.site_id
item_id = inv_item.item_id
# @ToDo: Check Permissions & Avoid DB updates in GETs
db(s3db.req_req_item.id == request.vars.req_item_id).update(site_id = site_id)
response.confirmation = T("%(item)s requested from %(site)s") % \
{"item": s3db.supply_ItemRepresent()(item_id),
"site": s3db.org_SiteRepresent()(site_id)
}
elif "req.site_id" in r.get_vars:
# Called from 'Make new request' button on [siteinstance]/req page
table.site_id.default = request.get_vars.get("req.site_id")
table.site_id.writable = False
if r.http == "POST":
del r.get_vars["req.site_id"]
table.requester_id.represent = requester_represent
# Set Fields and Labels depending on type
if type:
table.type.default = type
# This prevents the type from being edited AFTER it is set
table.type.readable = table.type.writable = False
crud_strings = settings.get_req_req_crud_strings(type)
if crud_strings:
s3.crud_strings["req_req"] = crud_strings
elif type == 1:
s3.crud_strings["req_req"].title_create = T("Make Supplies Request")
elif type == 3:
s3.crud_strings["req_req"].title_create = T("Make People Request")
# Filter the query based on type
if s3.filter:
s3.filter = s3.filter & \
(table.type == type)
else:
s3.filter = (table.type == type)
# These changes are applied via JS in create forms where type is editable
if type == 1: # Item
table.date_recv.readable = table.date_recv.writable = True
if settings.get_req_items_ask_purpose():
table.purpose.label = T("What the Items will be used for")
table.site_id.label = T("Deliver To")
table.request_for_id.label = T("Deliver To")
table.requester_id.label = T("Site Contact")
table.recv_by_id.label = T("Delivered To")
elif type == 3: # Person
table.date_required_until.readable = table.date_required_until.writable = True
table.purpose.label = T("Task Details")
table.purpose.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Task Details"),
T("Include any special requirements such as equipment which they need to bring.")))
table.site_id.label = T("Report To")
table.requester_id.label = T("Volunteer Contact")
table.request_for_id.label = T("Report To")
table.recv_by_id.label = T("Reported To")
if r.component:
if r.component.name == "document":
s3.crud.submit_button = T("Add")
#table = r.component.table
# @ToDo: Fix for Link Table
#table.date.default = r.record.date
#if r.record.site_id:
# stable = db.org_site
# query = (stable.id == r.record.site_id)
# site = db(query).select(stable.location_id,
# stable.organisation_id,
# limitby=(0, 1)).first()
# if site:
# table.location_id.default = site.location_id
# table.organisation_id.default = site.organisation_id
elif r.component.name == "req_item":
ctable = r.component.table
ctable.site_id.writable = ctable.site_id.readable = False
s3.req_hide_quantities(ctable)
elif r.component.name == "req_skill":
s3.req_hide_quantities(r.component.table)
elif r.component.alias == "job":
s3task.configure_tasktable_crud(
function="req_add_from_template",
args = [r.id],
vars = dict(user_id = auth.user is not None and auth.user.id or 0),
period = 86400, # seconds, so 1 day
)
db.scheduler_task.timeout.writable = False
else:
if r.id:
table.is_template.readable = table.is_template.writable = False
method = r.method
if method not in ("map", "read", "search", "update"):
# Hide fields which don't make sense in a Create form
s3.req_create_form_mods()
if type and settings.get_req_inline_forms():
s3.req_inline_form(type, method)
if auth.is_logged_in():
if not table.site_id.default:
table.site_id.default = auth.user.site_id
elif method == "map":
s3db.configure("req_req", marker_fn=marker_fn)
elif method == "update":
if settings.get_req_inline_forms():
s3.req_inline_form(type, method)
s3.scripts.append("/%s/static/scripts/S3/s3.req_update.js" % appname)
if r.record and (r.record.closed or r.record.cancel):
s3db.configure("req_req_item",
insertable = False)
elif r.representation == "plain":
pass
elif r.representation == "geojson":
mtable = s3db.gis_marker
s3db.configure("req_req", marker_fn=marker_fn)
if r.component and r.component.name == "commit":
table = r.component.table
record = r.record
stable = s3db.org_site
commit_status = record.commit_status
# Commits belonging to this request
rsites = []
query = (table.deleted == False)&(table.req_id == record.id)
req_sites = db(query).select(table.site_id)
for req_site in req_sites:
rsites += [req_site.site_id]
# All the sites
commit_sites = db((stable.deleted == False)).select(stable.id,
stable.code)
# Sites which have not committed to this request yet
site_opts = {}
for site in commit_sites:
if (site.id not in site_opts) and (site.id not in rsites):
site_opts[site.id] = site.code
table.site_id.requires = IS_IN_SET(site_opts)
if (commit_status == 2) and settings.get_req_restrict_on_complete():
# Restrict from committing to completed requests
s3db.configure(table,
listadd=False)
else:
# Allow commitments to be added when doing so as a component
s3db.configure(table,
listadd = True)
if type == 1: # Items
# Limit site_id to facilities the user has permissions for
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a commitment."))
if r.interactive:
# Dropdown not Autocomplete
itable = s3db.req_commit_item
itable.req_item_id.widget = None
req_id = r.id
s3db.req_commit_item.req_item_id.requires = \
IS_ONE_OF(db,
"req_req_item.id",
s3db.req_item_represent,
orderby = "req_req_item.id",
filterby = "req_id",
filter_opts = [req_id],
sort=True
)
s3.jquery_ready.append('''
S3OptionsFilter({
'triggerName':'req_item_id',
'targetName':'item_pack_id',
'lookupPrefix':'req',
'lookupResource':'req_item_packs',
'lookupKey':'req_item_id',
'lookupField':'id',
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})''')
# Custom Form
s3forms = s3base.s3forms
crud_form = s3forms.S3SQLCustomForm(
"site_id",
"date",
"date_available",
"committer_id",
s3forms.S3SQLInlineComponent(
"commit_item",
label = T("Items"),
fields = ["req_item_id",
"item_pack_id",
"quantity",
"comments"
]
),
"comments",
)
s3db.configure("req_commit", crud_form=crud_form)
# Redirect to the Items tab after creation
#s3db.configure(table,
# create_next = URL(c="req", f="commit",
# args=["[id]", "commit_item"]),
# update_next = URL(c="req", f="commit",
# args=["[id]", "commit_item"]))
elif type == 3: # People
# Limit site_id to orgs the user has permissions for
# @ToDo: Make this customisable between Site/Org
# @ToDo: is_affiliated()
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a commitment."))
# Limit organisation_id to organisations the user has permissions for
#auth.permitted_organisations(table=r.table, redirect_on_error=False)
if r.interactive:
#table.organisation_id.readable = True
#table.organisation_id.writable = True
# Custom Form
s3forms = s3base.s3forms
crud_form = s3forms.S3SQLCustomForm(
"site_id",
"date",
"date_available",
"committer_id",
s3forms.S3SQLInlineComponent(
"commit_skill",
label = T("Skills"),
fields = ["quantity",
"skill_id",
"comments"
]
),
"comments",
)
s3db.configure("req_commit", crud_form=crud_form)
# Redirect to the Skills tab after creation
#s3db.configure(table,
# create_next = URL(c="req", f="commit",
# args=["[id]", "commit_skill"]),
# update_next = URL(c="req", f="commit",
# args=["[id]", "commit_skill"]))
else:
# Non-Item commits can have an Organisation
# Check if user is affiliated to an Organisation
if is_affiliated():
# Limit organisation_id to organisations the user has permissions for
auth.permitted_organisations(table=r.table,
redirect_on_error=False)
table.organisation_id.readable = table.organisation_id.writable = True
else:
# Unaffiliated people can't commit on behalf of others
field = r.component.table.committer_id
field.writable = False
field.comment = None
# @ToDo: Assets do? (Well, a 'From Site')
table.site_id.readable = table.site_id.writable = False
#if r.interactive and r.record.type == 3: # People
# # Redirect to the Persons tab after creation
# s3db.configure(table,
# create_next = URL(c="req", f="commit",
# args=["[id]", "commit_person"]),
# update_next = URL(c="req", f="commit",
# args=["[id]", "commit_person"])
# )
else:
# Limit site_id to facilities the user has permissions for
# @ToDo: Non-Item requests shouldn't be bound to a Facility?
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a request."))
return True
s3.prep = prep
def postp(r, output):
if r.interactive and r.method != "import":
if not r.component:
s3_action_buttons(r)
if settings.get_req_use_commit():
s3.actions.append(
dict(url = URL(c="req", f="req",
args=["[id]", "commit_all"]),
_class = "action-btn commit-btn",
label = str(T("Commit"))
)
)
s3.jquery_ready.append(
'''S3ConfirmClick('.commit-btn','%s')''' % T("Do you want to commit to this request?"))
s3.actions.append(
dict(url = URL(c="req", f="req",
args=["[id]", "commit_all", "send"]),
_class = "action-btn send-btn",
label = str(T("Send"))
)
)
s3.jquery_ready.append(
'''S3ConfirmClick('.send-btn','%s')''' % T("Are you sure you want to commit to this request and send a shipment?"))
else:
s3_action_buttons(r)
if r.component.name == "req_item" and settings.get_req_prompt_match():
req_item_inv_item_btn = dict(url = URL(c = "req",
f = "req_item_inv_item",
args = ["[id]"]
),
_class = "action-btn",
label = str(T("Request from Facility")),
)
s3.actions.append(req_item_inv_item_btn)
if r.component.name == "commit":
if "form" in output:
id = r.record.id
ctable = s3db.req_commit
query = (ctable.deleted == False) & \
(ctable.req_id == id)
exists = current.db(query).select(ctable.id, limitby=(0, 1))
if not exists:
output["form"] = A(T("Commit All"),
_href=URL(args=[id, "commit_all"]),
_class="action-btn",
_id="commit-btn")
s3.jquery_ready.append('''
S3ConfirmClick('#commit-btn','%s')''' % T("Do you want to commit to this request?"))
else:
s3.actions.append(
dict(url = URL(c="req", f="send_commit",
args = ["[id]"]),
_class = "action-btn send-btn",
label = str(T("Prepare Shipment"))
)
)
s3.jquery_ready.append(
'''S3ConfirmClick('.send-btn','%s')''' % T("Are you sure you want to send this shipment?"))
if r.component.alias == "job":
s3.actions = [
dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="req", f="req_template",
args=[str(r.id), "job", "[id]"])),
dict(label=str(T("Reset")),
_class="action-btn",
url=URL(c="req", f="req_template",
args=[str(r.id), "job", "[id]", "reset"])),
dict(label=str(T("Run Now")),
_class="action-btn",
url=URL(c="req", f="req_template",
args=[str(r.id), "job", "[id]", "run"])),
]
return output
s3.postp = postp
output = s3_rest_controller("req", "req",
rheader=s3db.req_rheader)
return output
def requester_represent(id, show_link=True):
if not id:
return current.messages["NONE"]
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (htable.id == id) & \
(htable.person_id == ptable.id)
left = ctable.on((ctable.pe_id == ptable.pe_id) & \
(ctable.contact_method == "SMS"))
row = db(query).select(htable.type,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
ctable.value,
left=left,
limitby=(0, 1)).first()
try:
hr = row["hrm_human_resource"]
except:
return current.messages.UNKNOWN_OPT
repr = s3_fullname(row.pr_person)
if row.pr_contact.value:
repr = "%s %s" % (repr, row.pr_contact.value)
if show_link:
if hr.type == 1:
controller = "hrm"
group = "staff"
else:
controller = "vol"
group = "volunteer"
request.extension = "html"
return A(repr,
_href = URL(c = controller,
f = "person",
args = ["contacts"],
vars = {"group": group,
"human_resource.id": id}
)
)
return repr
def req_item():
if not s3.filter:
ritable = s3db.req_req_item
rtable = db.req_req
s3.filter = (rtable.is_template == False) & \
(rtable.id == ritable.req_id)
search_method = s3db.get_config("req_req_item", "search_method")
if not search_method:
S3SearchOptionsWidget = s3base.S3SearchOptionsWidget
req_item_search = (
S3SearchOptionsWidget(
name="req_search_fulfil_status",
label=T("Status"),
field="req_id$fulfil_status",
options = s3.req_status_opts,
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_priority",
label=T("Priority"),
field="req_id$priority",
options = s3.req_priority_opts,
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_L3",
field="req_id$site_id$location_id$L3",
location_level="L3",
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_L4",
field="req_id$site_id$location_id$L4",
location_level="L4",
cols = 3,
),
)
s3db.configure("req_req_item",
search_method = s3base.S3Search(advanced=req_item_search),
)
def prep(r):
if r.interactive:
list_fields = s3db.get_config("req_req_item", "list_fields")
list_fields.insert(1, "req_id$site_id")
list_fields.insert(1, "req_id$site_id$location_id$L4")
list_fields.insert(1, "req_id$site_id$location_id$L3")
s3db.configure("req_req_item",
insertable = False,
list_fields = list_fields,
)
s3.crud_strings["req_req_item"].title_list = T("Requested Items")
if r.method != None and r.method != "update" and r.method != "read":
# - includes one embedded in list_create
# - list_fields over-rides, so still visible within list itself
s3db.req_hide_quantities(r.table)
return True
s3.prep = prep
output = s3_rest_controller("req", "req_item")
if settings.get_req_prompt_match():
req_item_inv_item_btn = dict(url = URL(c="req", f="req_item_inv_item",
args=["[id]"]),
_class = "action-btn",
label = str(T("Request from Facility")),
)
if s3.actions:
s3.actions += [req_item_inv_item_btn]
else:
s3.actions = [req_item_inv_item_btn]
return output
# -----------------------------------------------------------------------------
def req_item_packs():
req_item_id = None
args = request.args
if len(args) == 1 and args[0].isdigit():
req_item_id = args[0]
else:
for v in request.vars:
if "." in v and v.split(".", 1)[1] == "req_item_id":
req_item_id = request.vars[v]
break
table = s3db.supply_item_pack
ritable = s3db.req_req_item
query = (ritable.id == req_item_id) & \
(ritable.item_id == table.item_id)
response.headers["Content-Type"] = "application/json"
return db(query).select(table.id,
table.name,
table.quantity).json()
# -----------------------------------------------------------------------------
def req_item_inv_item():
req_item_id = request.args[0]
request.args = [] #
ritable = s3db.req_req_item
req_item = ritable[req_item_id]
rtable = s3db.req_req
req = rtable[req_item.req_id]
output = {}
output["title"] = T("Request Stock from Available Warehouse")
output["req_btn"] = A(T("Return to Request"),
_href = URL(c="req", f="req",
args=[req_item.req_id, "req_item"]),
_class = "action-btn"
)
output["req_item"] = TABLE( TR(
TH( "%s: " % T("Requested By") ),
rtable.site_id.represent(req.site_id),
TH( "%s: " % T("Item")),
ritable.item_id.represent(req_item.item_id),
),
TR(
TH( "%s: " % T("Requester") ),
rtable.requester_id.represent(req.requester_id),
TH( "%s: " % T("Quantity")),
req_item.quantity,
),
TR(
TH( "%s: " % T("Date Requested") ),
rtable.date.represent(req.date),
TH( T("Quantity Committed")),
req_item.quantity_commit,
),
TR(
TH( "%s: " % T("Date Required") ),
rtable.date_required.represent(req.date_required),
TH( "%s: " % T("Quantity in Transit")),
req_item.quantity_transit,
),
TR(
TH( "%s: " % T("Priority") ),
rtable.priority.represent(req.priority),
TH( "%s: " % T("Quantity Fulfilled")),
req_item.quantity_fulfil,
)
)
s3.no_sspag = True # pagination won't work with 2 datatables on one page @todo: test
itable = s3db.inv_inv_item
s3.filter = (itable.item_id == req_item.item_id)
s3.crud_strings["inv_inv_item"].msg_list_empty = T("No Inventories currently have this item in stock")
inv_items = s3_rest_controller("inv", "inv_item")
output["items"] = inv_items["items"]
if current.deployment_settings.get_supply_use_alt_name():
atable = s3db.supply_item_alt
query = (atable.item_id == req_item.item_id ) & \
(atable.deleted == False )
alt_item_rows = db(query).select(atable.alt_item_id)
alt_item_ids = [alt_item_row.alt_item_id for alt_item_row in alt_item_rows]
if alt_item_ids:
s3.filter = (itable.item_id.belongs(alt_item_ids))
inv_items_alt = s3_rest_controller("inv", "inv_item")
output["items_alt"] = inv_items_alt["items"]
else:
output["items_alt"] = T("No Inventories currently have suitable alternative items in stock")
response.view = "req/req_item_inv_item.html"
s3.actions = [dict(url = URL(c = request.controller,
f = "req",
args = [req_item.req_id, "req_item"],
vars = dict(req_item_id = req_item_id,
inv_item_id = "[id]")
),
_class = "action-btn",
label = str(T("Request From")),
)]
return output
def req_skill():
table = s3db.req_req_skill
rtable = s3db.req_req
s3.filter = (rtable.is_template == False) & \
(rtable.id == table.req_id)
S3SearchOptionsWidget = s3base.S3SearchOptionsWidget
req_skill_search = (
S3SearchOptionsWidget(
name="req_search_fulfil_status",
label=T("Status"),
field="req_id$fulfil_status",
options = s3.req_status_opts,
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_priority",
label=T("Priority"),
field="req_id$priority",
options = s3.req_priority_opts,
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_L3",
field="req_id$site_id$location_id$L3",
location_level="L3",
cols = 3,
),
S3SearchOptionsWidget(
name="req_search_L4",
field="req_id$site_id$location_id$L4",
location_level="L4",
cols = 3,
),
)
s3db.configure("req_req_skill",
search_method = s3base.S3Search(advanced=req_skill_search),
)
def prep(r):
if r.interactive:
list_fields = s3db.get_config("req_req_skill", "list_fields")
list_fields.insert(1, "req_id$site_id")
list_fields.insert(1, "req_id$site_id$location_id$L4")
list_fields.insert(1, "req_id$site_id$location_id$L3")
s3db.configure("req_req_skill",
insertable=False,
list_fields = list_fields,
)
if r.method != "update" and r.method != "read":
# - includes one embedded in list_create
# - list_fields over-rides, so still visible within list itself
s3db.req_hide_quantities(r.table)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
s3.actions = [
dict(url = URL(c="req", f="req",
args=["req_skill", "[id]"]),
_class = "action-btn",
label = str(READ)
)
]
return output
s3.postp = postp
output = s3_rest_controller("req", "req_skill")
return output
# =============================================================================
def summary_option():
return s3_rest_controller()
# =============================================================================
def commit():
# Check if user is affiliated to an Organisation
if not is_affiliated():
tablename = "req_commit_person"
table = s3db[tablename]
# Unaffiliated people can't commit on behalf of others
table.person_id.writable = False
s3db.configure(tablename,
insertable=False)
def prep(r):
if r.interactive:
table = r.table
if r.record:
s3.crud.submit_button = T("Save Changes")
if r.record.type == 1:
auth.permitted_facilities(table=table,
error_msg=T("You do not have permission for any facility to make a commitment.") )
table.site_id.comment = A(T("Set as default Site"),
_id="req_commit_site_id_link",
_target="_blank",
_href=URL(c="default",
f="user",
args=["profile"]))
jappend = s3.jquery_ready.append
jappend('''
$('#req_commit_site_id_link').click(function(){
var site_id=$('#req_commit_site_id').val()
if(site_id){
var url = $('#req_commit_site_id_link').attr('href')
var exists=url.indexOf('?')
if(exists=='-1'){
$('#req_commit_site_id_link').attr('href',url+'?site_id='+site_id)
}
}
return true
})''')
itable = s3db.req_commit_item
itable.req_item_id.widget = None
jappend('''
S3OptionsFilter({
'triggerName':'req_item_id',
'targetName':'item_pack_id',
'lookupPrefix':'req',
'lookupResource':'req_item_packs',
'lookupKey':'req_item_id',
'lookupField':'id',
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})''')
s3forms = s3base.s3forms
crud_form = s3forms.S3SQLCustomForm(
"site_id",
"date",
"date_available",
"committer_id",
s3forms.S3SQLInlineComponent(
"commit_item",
label = T("Items"),
fields = ["req_item_id",
"item_pack_id",
"quantity",
"comments"
]
),
"comments",
)
s3db.configure("req_commit", crud_form=crud_form)
elif r.record.type == 3:
auth.permitted_facilities(table=r.table,
error_msg=T("You do not have permission for any facility to make a commitment."))
table.site_id.comment = A(T("Set as default Site"),
_id="req_commit_site_id_link",
_target="_blank",
_href=URL(c="default",
f="user",
args=["profile"]))
s3forms = s3base.s3forms
crud_form = s3forms.S3SQLCustomForm(
"site_id",
"date",
"date_available",
"committer_id",
s3forms.S3SQLInlineComponent(
"commit_skill",
label = T("People"),
fields = ["quantity",
"skill_id",
"comments"
]
),
"comments",
)
s3db.configure("req_commit", crud_form=crud_form)
else:
auth.permitted_organisations(table=r.table, redirect_on_error=False)
table.organisation_id.readable = True
table.organisation_id.writable = True
# @ToDo: Assets do?
table.site_id.readable = False
table.site_id.writable = False
if r.component:
req_id = r.record.req_id
if r.component.name == "commit_item":
# Limit commit items to items from the request
s3db.req_commit_item.req_item_id.requires = \
IS_ONE_OF(db,
"req_req_item.id",
s3db.req_item_represent,
orderby = "req_req_item.id",
filterby = "req_id",
filter_opts = [req_id],
sort=True
)
elif r.component.name == "person":
pass
# Limit commit skills to skills from the request
#db.req_commit_skill.req_skill_id.requires = \
# IS_ONE_OF(db,
# "req_req_skill.id",
# s3db.req_skill_represent,
# orderby = "req_req_skill.id",
# filterby = "req_id",
# filter_opts = [req_id],
# sort=True
# )
return True
s3.prep = prep
def postp(r, output):
if r.interactive and r.method != "import":
if not r.component:
table = r.table
record = r.record
s3_action_buttons(r)
s3.actions.append(
dict(url = URL(f = "send_commit",
args=["[id]"]),
_class = "action-btn send-btn",
label = str(T("Prepare Shipment"))
)
)
s3.jquery_ready.append(
'''S3ConfirmClick('.send-btn','%s')''' % T("Are you sure you want to send this shipment?"))
return output
s3.postp = postp
output = s3_rest_controller(rheader=commit_rheader)
return output
# -----------------------------------------------------------------------------
def commit_rheader(r):
if r.representation == "html":
record = r.record
if record and r.name == "commit":
s3_date_represent = s3base.S3DateTime.date_represent
tabs = [(T("Edit Details"), None)]
type = record.type and int(record.type)
table = r.table
if type == 1:
tabs.append((T("Items"), "commit_item"))
#req_record = db.req_req[record.req_id]
#req_date = req_record.date
rheader = DIV(TABLE(TR(TH("%s: " % table.req_id.label),
table.req_id.represent(record.req_id),
),
TR(TH("%s: " % T("Committing Warehouse")),
s3db.org_site_represent(record.site_id),
TH("%s: " % T("Commit Date")),
s3_date_represent(record.date),
),
TR(TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3)
),
),
)
prepare_btn = A(T("Prepare Shipment"),
_href = URL(f = "send_commit",
args = [record.id]
),
_id = "send_commit",
_class = "action-btn"
)
s3.rfooter = TAG[""](prepare_btn)
# send_btn = A( T("Send Commitment as Shipment"),
# _href = URL(f = "send_commit",
# args = [record.id]
# ),
# _id = "send_commit",
# _class = "action-btn"
# )
#
# send_btn_confirm = SCRIPT("S3ConfirmClick('#send_commit', '%s')" %
# T("Do you want to send these Committed items?") )
# s3.rfooter = TAG[""](send_btn,send_btn_confirm)
#rheader.append(send_btn)
#rheader.append(send_btn_confirm)
elif type == 3:
#tabs.append((T("People"), "commit_person"))
tabs.append((T("People"), "commit_skill"))
#req_record = db.req_req[record.req_id]
#req_date = req_record.date
organisation_represent = s3db.org_organisation_represent
rheader = DIV(TABLE(TR(TH("%s: " % table.req_id.label),
table.req_id.represent(record.req_id),
),
TR(TH("%s: " % T("Committing Organization")),
organisation_represent(record.organisation_id),
TH("%s: " % T("Commit Date")),
s3_date_represent(record.date),
),
TR(TH("%s: " % table.comments.label),
TD(record.comments, _colspan=3)
),
),
)
else:
# Other (& Assets/Shelter)
rheader = DIV(TABLE(TR(TH("%s: " % table.req_id.label),
table.req_id.represent(record.req_id),
),
TR(TH("%s: " % T("Committing Person")),
table.committer_id.represent(record.committer_id),
TH("%s: " % T("Commit Date")),
s3_date_represent(record.date),
),
TR(TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3)
),
),
)
rheader_tabs = s3_rheader_tabs(r,
tabs)
rheader.append(rheader_tabs)
return rheader
return None
# =============================================================================
def send():
s3db.configure("inv_send",
listadd=False)
return s3db.inv_send_controller()
# ==============================================================================
def send_commit():
return s3db.req_send_commit()
# -----------------------------------------------------------------------------
def send_process():
return s3db.inv_send_process()
# =============================================================================
def commit_item():
return s3_rest_controller()
# =============================================================================
def commit_req():
req_id = request.args[0]
site_id = request.vars.get("site_id")
table = s3db.req_req
r_req = db(table.id == req_id).select(table.type,
limitby=(0, 1)).first()
# User must have permissions over facility which is sending
(prefix, resourcename, id) = s3db.get_instance(s3db.org_site, site_id)
if not site_id or not auth.s3_has_permission("update",
"%s_%s" % (prefix,
resourcename),
record_id=id):
session.error = T("You do not have permission to make this commitment.")
redirect(URL(c="req", f="req",
args=[req_id]))
# Create a new commit record
commit_id = s3db.req_commit.insert(date = request.utcnow,
req_id = req_id,
site_id = site_id,
type = r_req.type
)
# Only select items which are in the warehouse
ritable = s3db.req_req_item
iitable = s3db.inv_inv_item
query = (ritable.req_id == req_id) & \
(ritable.quantity_fulfil < ritable.quantity) & \
(iitable.site_id == site_id) & \
(ritable.item_id == iitable.item_id) & \
(ritable.deleted == False) & \
(iitable.deleted == False)
req_items = db(query).select(ritable.id,
ritable.quantity,
ritable.item_pack_id,
iitable.item_id,
iitable.quantity,
iitable.item_pack_id)
citable = s3db.req_commit_item
for req_item in req_items:
req_item_quantity = req_item.req_req_item.quantity * \
req_item.req_req_item.pack_quantity
inv_item_quantity = req_item.inv_inv_item.quantity * \
req_item.inv_inv_item.pack_quantity
if inv_item_quantity > req_item_quantity:
commit_item_quantity = req_item_quantity
else:
commit_item_quantity = inv_item_quantity
commit_item_quantity = commit_item_quantity / req_item.req_req_item.pack_quantity
if commit_item_quantity:
req_item_id = req_item.req_req_item.id
commit_item_id = citable.insert(commit_id = commit_id,
req_item_id = req_item_id,
item_pack_id = req_item.req_req_item.item_pack_id,
quantity = commit_item_quantity
)
# Update the req_item.commit_quantity & req.commit_status
s3mgr.store_session("req", "commit_item", commit_item_id)
form = Storage()
form.vars = Storage(
req_item_id = req_item_id
)
s3db.req_commit_item_onaccept(form)
# Redirect to commit
redirect(URL(c="req", f="commit",
args=[commit_id, "commit_item"]))
# =============================================================================
def send_req():
req_id = request.args[0]
site_id = request.vars.get("site_id", None)
site_name = s3db.org_site_represent(site_id, show_link=False)
ritable = s3db.req_req_item
iitable = s3db.inv_inv_item
sendtable = s3db.inv_send
tracktable = s3db.inv_track_item
siptable = s3db.supply_item_pack
table = s3db.req_req
r_req = db(table.id == req_id).select(table.req_ref,
table.requester_id,
table.site_id,
limitby=(0, 1)).first()
# User must have permissions over facility which is sending
(prefix, resourcename, id) = s3db.get_instance(db.org_site, site_id)
if not site_id or not auth.s3_has_permission("update",
"%s_%s" % (prefix,
resourcename),
record_id=id):
session.error = T("You do not have permission to send this shipment.")
redirect(URL(c="req", f="req",
args = [req_id]))
# Create a new send record
code = s3db.inv_get_shipping_code("WB",
site_id,
s3db.inv_send.send_ref
)
send_id = sendtable.insert(send_ref = code,
req_ref = r_req.req_ref,
sender_id = auth.s3_logged_in_person(),
site_id = site_id,
date = request.utcnow,
recipient_id = r_req.requester_id,
to_site_id = r_req.site_id,
status = s3db.inv_ship_status["IN_PROCESS"],
)
# Get the items for this request that have not been fulfilled (in transit)
sip_id_field = siptable.id
sip_quantity_field = siptable.quantity
query = (ritable.req_id == req_id) & \
(ritable.quantity_transit < ritable.quantity) & \
(ritable.deleted == False) & \
(ritable.item_pack_id == sip_id_field)
req_items = db(query).select(ritable.id,
ritable.quantity,
ritable.quantity_transit,
ritable.quantity_fulfil,
ritable.item_id,
sip_quantity_field
)
# Loop through each request item and find matched in the site inventory
IN_PROCESS = s3db.inv_tracking_status["IN_PROCESS"]
insert = tracktable.insert
inv_remove = s3db.inv_remove
ii_item_id_field = iitable.item_id
ii_quantity_field = iitable.quantity
ii_expiry_field = iitable.expiry_date
ii_purchase_field = iitable.purchase_date
iifields = [iitable.id,
ii_item_id_field,
ii_quantity_field,
iitable.item_pack_id,
iitable.pack_value,
iitable.currency,
ii_expiry_field,
ii_purchase_field,
iitable.bin,
iitable.owner_org_id,
iitable.supply_org_id,
sip_quantity_field,
]
bquery = (ii_quantity_field > 0) & \
(iitable.site_id == site_id) & \
(iitable.deleted == False) & \
(iitable.item_pack_id == sip_id_field)
orderby = ii_expiry_field | ii_purchase_field
no_match = True
for ritem in req_items:
rim = ritem.req_req_item
rim_id = rim.id
query = bquery & \
(ii_item_id_field == rim.item_id)
inv_items = db(query).select(*iifields,
orderby=orderby)
if len(inv_items) == 0:
break;
no_match = False
one_match = len(inv_items) == 1
# Get the Quantity Needed
quantity_shipped = max(rim.quantity_transit, rim.quantity_fulfil)
quantity_needed = (rim.quantity - quantity_shipped) * ritem.supply_item_pack.quantity
# Insert the track item records
# If there is more than one item match then we select the stock with the oldest expiry date first
# then the oldest purchase date first
# then a complete batch, if-possible
iids = []
append = iids.append
for item in inv_items:
if not quantity_needed:
break
iitem = item.inv_inv_item
if one_match:
# Remove this total from the warehouse stock
send_item_quantity = inv_remove(iitem, quantity_needed)
quantity_needed -= send_item_quantity
append(iitem.id)
else:
quantity_available = iitem.quantity * item.supply_item_pack.quantity
if iitem.expiry_date:
# We take first from the oldest expiry date
send_item_quantity = min(quantity_needed, quantity_available)
# Remove this total from the warehouse stock
send_item_quantity = inv_remove(iitem, send_item_quantity)
quantity_needed -= send_item_quantity
append(iitem.id)
elif iitem.purchase_date:
# We take first from the oldest purchase date for non-expiring stock
send_item_quantity = min(quantity_needed, quantity_available)
# Remove this total from the warehouse stock
send_item_quantity = inv_remove(iitem, send_item_quantity)
quantity_needed -= send_item_quantity
append(iitem.id)
elif quantity_needed <= quantity_available:
# Assign a complete batch together if possible
# Remove this total from the warehouse stock
send_item_quantity = inv_remove(iitem, quantity_needed)
quantity_needed = 0
append(iitem.id)
else:
# Try again on the second loop, if-necessary
continue
insert(send_id = send_id,
send_inv_item_id = iitem.id,
item_id = iitem.item_id,
req_item_id = rim_id,
item_pack_id = iitem.item_pack_id,
quantity = send_item_quantity,
status = IN_PROCESS,
pack_value = iitem.pack_value,
currency = iitem.currency,
bin = iitem.bin,
expiry_date = iitem.expiry_date,
owner_org_id = iitem.owner_org_id,
supply_org_id = iitem.supply_org_id,
#comments = comment,
)
# 2nd pass
for item in inv_items:
if not quantity_needed:
break
iitem = item.inv_inv_item
if iitem.id in iids:
continue
# We have no way to know which stock we should take 1st so show all with quantity 0 & let the user decide
send_item_quantity = 0
insert(send_id = send_id,
send_inv_item_id = iitem.id,
item_id = iitem.item_id,
req_item_id = rim_id,
item_pack_id = iitem.item_pack_id,
quantity = send_item_quantity,
status = IN_PROCESS,
pack_value = iitem.pack_value,
currency = iitem.currency,
bin = iitem.bin,
expiry_date = iitem.expiry_date,
owner_org_id = iitem.owner_org_id,
supply_org_id = iitem.supply_org_id,
#comments = comment,
)
if no_match:
session.warning = \
T("%(site)s has no items exactly matching this request. There may still be other items in stock which can fulfill this request!") % \
dict(site=site_name)
# Redirect to view the list of items in the Send
redirect(URL(c = "inv",
f = "send",
args = [send_id, "track_item"])
)
# =============================================================================
def commit_item_json():
ctable = s3db.req_commit
itable = s3db.req_commit_item
stable = s3db.org_site
#ctable.date.represent = lambda dt: dt[:10]
query = (itable.req_item_id == request.args[0]) & \
(ctable.id == itable.commit_id) & \
(ctable.site_id == stable.id) & \
(itable.deleted == False)
records = db(query).select(ctable.id,
ctable.date,
stable.name,
itable.quantity,
orderby = db.req_commit.date)
json_str = '''[%s,%s''' % (json.dumps(dict(id = str(T("Committed")),
quantity = "#")),
records.json()[1:])
response.headers["Content-Type"] = "application/json"
return json_str
# =============================================================================
def fema():
ritable = s3db.req_req_item
rtable = db.req_req
itable = db.supply_item
ictable = db.supply_item_category
citable = db.supply_catalog_item
query = (ictable.name == "FEMA") & \
(citable.item_category_id == ictable.id) & \
(citable.item_id == itable.id) & \
(itable.deleted != True)
fema_items = db(query).select(itable.id)
fema_item_ids = [item.id for item in fema_items]
REQ_STATUS_COMPLETE = 2
s3.filter = (rtable.deleted != True) & \
(rtable.is_template == False) & \
(rtable.commit_status != REQ_STATUS_COMPLETE) & \
(rtable.transit_status != REQ_STATUS_COMPLETE) & \
(rtable.fulfil_status != REQ_STATUS_COMPLETE) & \
(ritable.req_id == rtable.id) & \
(ritable.quantity > ritable.quantity_commit) & \
(ritable.quantity > ritable.quantity_transit) & \
(ritable.quantity > ritable.quantity_fulfil) & \
(ritable.deleted != True) & \
(ritable.item_id.belongs(fema_item_ids))
# Search method
req_item_search = [
s3base.S3SearchOptionsWidget(
name="req_search_site",
field="req_id$site_id",
label = T("Facility"),
cols = 3,
),
]
s3db.configure("req_req_item",
search_method = s3base.S3Search(advanced=req_item_search),
)
output = req_item()
return output
# END =========================================================================
| true
| true
|
f7196479819c081e316242e97b6c71d0635143b6
| 249
|
py
|
Python
|
ACM/NAQ16/G.py
|
zzh8829/CompetitiveProgramming
|
36f36b10269b4648ca8be0b08c2c49e96abede25
|
[
"MIT"
] | 1
|
2017-10-01T00:51:39.000Z
|
2017-10-01T00:51:39.000Z
|
ACM/NAQ16/G.py
|
zzh8829/CompetitiveProgramming
|
36f36b10269b4648ca8be0b08c2c49e96abede25
|
[
"MIT"
] | null | null | null |
ACM/NAQ16/G.py
|
zzh8829/CompetitiveProgramming
|
36f36b10269b4648ca8be0b08c2c49e96abede25
|
[
"MIT"
] | null | null | null |
sa = input()
la = len(sa)
a = int(sa)
import math
num = 0
i = 0
while True:
i += 1
num += math.log10(i)
if(math.ceil(num) >= la- 100):
print(i, num, la)
if(math.ceil(num) == la):
print(i)
if(math.ceil(num) > la):
break
| 10.375
| 32
| 0.522088
|
sa = input()
la = len(sa)
a = int(sa)
import math
num = 0
i = 0
while True:
i += 1
num += math.log10(i)
if(math.ceil(num) >= la- 100):
print(i, num, la)
if(math.ceil(num) == la):
print(i)
if(math.ceil(num) > la):
break
| true
| true
|
f719651f1696393c8cda5badd8ce6c3c1ce02286
| 3,397
|
py
|
Python
|
browser_history/cli.py
|
RobertWetzler/browser-history
|
bce5438e8b697e9be70d3747d0b9835c6c1324bc
|
[
"Apache-2.0"
] | null | null | null |
browser_history/cli.py
|
RobertWetzler/browser-history
|
bce5438e8b697e9be70d3747d0b9835c6c1324bc
|
[
"Apache-2.0"
] | null | null | null |
browser_history/cli.py
|
RobertWetzler/browser-history
|
bce5438e8b697e9be70d3747d0b9835c6c1324bc
|
[
"Apache-2.0"
] | null | null | null |
"""This module defines functions and globals required for the
command line interface of browser-history."""
import sys
import argparse
from browser_history import get_history, generic, browsers, utils
# get list of all implemented browser by finding subclasses of generic.Browser
AVAILABLE_BROWSERS = ', '.join(b.__name__ for b in generic.Browser.__subclasses__())
AVAILABLE_FORMATS = ', '.join(generic.Outputs.formats)
def make_parser():
"""Creates an ArgumentParser, configures and returns it.
This was made into a separate function to be used with sphinx-argparse
:rtype: :py:class:`argparse.ArgumentParser`
"""
parser_ = argparse.ArgumentParser(description='''
A tool to retrieve history from
(almost) any browser on (almost) any platform''',
epilog='''
Checkout the GitHub repo https://github.com/pesos/browser-history
if you have any issues or want to help contribute''')
parser_.add_argument('-b', '--browser',
default='all',
help=f'''
browser to retrieve history from. Should be one of all, {AVAILABLE_BROWSERS}.
Default is all (gets history from all browsers).''')
parser_.add_argument('-f', '--format',
default="csv",
help=f'''
Format to be used in output. Should be one of {AVAILABLE_FORMATS}.
Default is csv''')
parser_.add_argument('-o', '--output',
default=None,
help='''
File where output is to be written.
If not provided standard output is used.''')
return parser_
parser = make_parser()
def main():
"""Entrypoint to the command-line interface (CLI) of browser-history.
It parses arguments from sys.argv and performs the appropriate actions.
"""
args = parser.parse_args()
if args.browser == 'all':
outputs = get_history()
else:
try:
# gets browser class by name (string).
selected_browser = args.browser
for browser in generic.Browser.__subclasses__():
if browser.__name__.lower() == args.browser.lower():
selected_browser = browser.__name__
break
browser_class = getattr(browsers, selected_browser)
except AttributeError:
utils.logger.error('Browser %s is unavailable. Check --help for available browsers',
args.browser)
sys.exit(1)
try:
browser = browser_class().fetch()
outputs = browser
except AssertionError as e:
utils.logger.error(e)
sys.exit(1)
# Format the output
try:
formatted = outputs.formatted(args.format)
except ValueError as e:
utils.logger.error(e)
sys.exit(1)
if args.output is None:
print(formatted)
else:
filename = args.output
with open(filename, 'w') as output_file:
output_file.write(formatted)
| 37.32967
| 109
| 0.548719
|
import sys
import argparse
from browser_history import get_history, generic, browsers, utils
AVAILABLE_BROWSERS = ', '.join(b.__name__ for b in generic.Browser.__subclasses__())
AVAILABLE_FORMATS = ', '.join(generic.Outputs.formats)
def make_parser():
parser_ = argparse.ArgumentParser(description='''
A tool to retrieve history from
(almost) any browser on (almost) any platform''',
epilog='''
Checkout the GitHub repo https://github.com/pesos/browser-history
if you have any issues or want to help contribute''')
parser_.add_argument('-b', '--browser',
default='all',
help=f'''
browser to retrieve history from. Should be one of all, {AVAILABLE_BROWSERS}.
Default is all (gets history from all browsers).''')
parser_.add_argument('-f', '--format',
default="csv",
help=f'''
Format to be used in output. Should be one of {AVAILABLE_FORMATS}.
Default is csv''')
parser_.add_argument('-o', '--output',
default=None,
help='''
File where output is to be written.
If not provided standard output is used.''')
return parser_
parser = make_parser()
def main():
args = parser.parse_args()
if args.browser == 'all':
outputs = get_history()
else:
try:
selected_browser = args.browser
for browser in generic.Browser.__subclasses__():
if browser.__name__.lower() == args.browser.lower():
selected_browser = browser.__name__
break
browser_class = getattr(browsers, selected_browser)
except AttributeError:
utils.logger.error('Browser %s is unavailable. Check --help for available browsers',
args.browser)
sys.exit(1)
try:
browser = browser_class().fetch()
outputs = browser
except AssertionError as e:
utils.logger.error(e)
sys.exit(1)
try:
formatted = outputs.formatted(args.format)
except ValueError as e:
utils.logger.error(e)
sys.exit(1)
if args.output is None:
print(formatted)
else:
filename = args.output
with open(filename, 'w') as output_file:
output_file.write(formatted)
| true
| true
|
f7196672dac355b888cbfce65b0f4b2221ebe267
| 30,455
|
py
|
Python
|
testing/test_basic.py
|
yaccz/pytest-twisted
|
5dc4efc5d335da0172fec02e48076aacef4bf75d
|
[
"BSD-3-Clause"
] | null | null | null |
testing/test_basic.py
|
yaccz/pytest-twisted
|
5dc4efc5d335da0172fec02e48076aacef4bf75d
|
[
"BSD-3-Clause"
] | null | null | null |
testing/test_basic.py
|
yaccz/pytest-twisted
|
5dc4efc5d335da0172fec02e48076aacef4bf75d
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import textwrap
import pytest
# https://docs.python.org/3/whatsnew/3.5.html#pep-492-coroutines-with-async-and-await-syntax
ASYNC_AWAIT = sys.version_info >= (3, 5)
# https://docs.python.org/3/whatsnew/3.6.html#pep-525-asynchronous-generators
ASYNC_GENERATORS = sys.version_info >= (3, 6)
timeout = 15
# https://github.com/pytest-dev/pytest/issues/6505
def force_plural(name):
if name in {"error", "warning"}:
return name + "s"
return name
def assert_outcomes(run_result, outcomes):
formatted_output = format_run_result_output_for_assert(run_result)
try:
result_outcomes = run_result.parseoutcomes()
except ValueError:
assert False, formatted_output
normalized_result_outcomes = {
force_plural(name): outcome
for name, outcome in result_outcomes.items()
if name != "seconds"
}
assert normalized_result_outcomes == outcomes, formatted_output
def format_run_result_output_for_assert(run_result):
tpl = """
---- stdout
{}
---- stderr
{}
----
"""
return textwrap.dedent(tpl).format(
run_result.stdout.str(), run_result.stderr.str()
)
@pytest.fixture(name="default_conftest", autouse=True)
def _default_conftest(testdir):
testdir.makeconftest(textwrap.dedent("""
import pytest
import pytest_twisted
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
pytest_twisted._use_asyncio_selector_if_required(config=config)
"""))
def skip_if_reactor_not(request, expected_reactor):
actual_reactor = request.config.getoption("reactor", "default")
if actual_reactor != expected_reactor:
pytest.skip(
"reactor is {} not {}".format(actual_reactor, expected_reactor),
)
def skip_if_no_async_await():
return pytest.mark.skipif(
not ASYNC_AWAIT,
reason="async/await syntax not supported on Python <3.5",
)
def skip_if_no_async_generators():
return pytest.mark.skipif(
not ASYNC_GENERATORS,
reason="async generators not support on Python <3.6",
)
@pytest.fixture
def cmd_opts(request):
reactor = request.config.getoption("reactor", "default")
return (
sys.executable,
"-m",
"pytest",
"-v",
"--reactor={}".format(reactor),
)
def test_inline_callbacks_in_pytest():
assert hasattr(pytest, 'inlineCallbacks')
@pytest.mark.parametrize(
'decorator, should_warn',
(
('pytest.inlineCallbacks', True),
('pytest_twisted.inlineCallbacks', False),
),
)
def test_inline_callbacks_in_pytest_deprecation(
testdir,
cmd_opts,
decorator,
should_warn,
):
import_path, _, _ = decorator.rpartition('.')
test_file = """
import {import_path}
def test_deprecation():
@{decorator}
def f():
yield 42
""".format(import_path=import_path, decorator=decorator)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
expected_outcomes = {"passed": 1}
if should_warn:
expected_outcomes["warnings"] = 1
assert_outcomes(rr, expected_outcomes)
def test_blockon_in_pytest():
assert hasattr(pytest, 'blockon')
@pytest.mark.parametrize(
'function, should_warn',
(
('pytest.blockon', True),
('pytest_twisted.blockon', False),
),
)
def test_blockon_in_pytest_deprecation(
testdir,
cmd_opts,
function,
should_warn,
):
import_path, _, _ = function.rpartition('.')
test_file = """
import warnings
from twisted.internet import reactor, defer
import pytest
import {import_path}
@pytest.fixture
def foo(request):
d = defer.Deferred()
d.callback(None)
{function}(d)
def test_succeed(foo):
pass
""".format(import_path=import_path, function=function)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
expected_outcomes = {"passed": 1}
if should_warn:
expected_outcomes["warnings"] = 1
assert_outcomes(rr, expected_outcomes)
def test_fail_later(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
def test_fail():
def doit():
try:
1 / 0
except:
d.errback()
d = defer.Deferred()
reactor.callLater(0.01, doit)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"failed": 1})
def test_succeed_later(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_non_deferred(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
return 42
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_exception(testdir, cmd_opts):
test_file = """
def test_more_fail():
raise RuntimeError("foo")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"failed": 1})
@pytest.fixture(
name="empty_optional_call",
params=["", "()"],
ids=["no call", "empty call"],
)
def empty_optional_call_fixture(request):
return request.param
def test_inlineCallbacks(testdir, cmd_opts, empty_optional_call):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest.fixture(scope="module", params=["fs", "imap", "web"])
def foo(request):
return request.param
@pytest_twisted.inlineCallbacks{optional_call}
def test_succeed(foo):
yield defer.succeed(foo)
if foo == "web":
raise RuntimeError("baz")
""".format(optional_call=empty_optional_call)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
@skip_if_no_async_await()
def test_async_await(testdir, cmd_opts, empty_optional_call):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest.fixture(scope="module", params=["fs", "imap", "web"])
def foo(request):
return request.param
@pytest_twisted.ensureDeferred{optional_call}
async def test_succeed(foo):
await defer.succeed(foo)
if foo == "web":
raise RuntimeError("baz")
""".format(optional_call=empty_optional_call)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
def test_twisted_greenlet(testdir, cmd_opts):
test_file = """
import pytest, greenlet
MAIN = None
@pytest.fixture(scope="session", autouse=True)
def set_MAIN(request, twisted_greenlet):
global MAIN
MAIN = twisted_greenlet
def test_MAIN():
assert MAIN is not None
assert MAIN is greenlet.getcurrent()
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_blockon_in_fixture(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest.fixture(scope="module", params=["fs", "imap", "web"])
def foo(request):
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, request.param)
pytest_twisted.blockon(d1)
return d2
@pytest_twisted.inlineCallbacks
def test_succeed(foo):
x = yield foo
if x == "web":
raise RuntimeError("baz")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
@skip_if_no_async_await()
def test_blockon_in_fixture_async(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest.fixture(scope="module", params=["fs", "imap", "web"])
def foo(request):
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, request.param)
pytest_twisted.blockon(d1)
return d2
@pytest_twisted.ensureDeferred
async def test_succeed(foo):
x = await foo
if x == "web":
raise RuntimeError("baz")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
@skip_if_no_async_await()
def test_async_fixture(testdir, cmd_opts):
pytest_ini_file = """
[pytest]
markers =
redgreenblue
"""
testdir.makefile('.ini', pytest=pytest_ini_file)
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_fixture(
scope="function",
params=["fs", "imap", "web"],
)
@pytest.mark.redgreenblue
async def foo(request):
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, request.param)
await d1
return d2,
@pytest_twisted.inlineCallbacks
def test_succeed_blue(foo):
x = yield foo[0]
if x == "web":
raise RuntimeError("baz")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
@skip_if_no_async_await()
def test_async_fixture_no_arguments(testdir, cmd_opts, empty_optional_call):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_fixture{optional_call}
async def scope(request):
return request.scope
def test_is_function_scope(scope):
assert scope == "function"
""".format(optional_call=empty_optional_call)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_ordered_teardown(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
results = []
@pytest.fixture(scope='function')
def sync_fixture():
yield 42
results.append(2)
@pytest_twisted.async_yield_fixture(scope='function')
async def async_fixture(sync_fixture):
yield sync_fixture
results.append(1)
def test_first(async_fixture):
assert async_fixture == 42
def test_second():
assert results == [1, 2]
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
@skip_if_no_async_generators()
def test_async_yield_fixture_can_await(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest_twisted
@pytest_twisted.async_yield_fixture()
async def foo():
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, 2)
await d1
# Twisted doesn't allow calling back with a Deferred as a value.
# This deferred is being wrapped up in a tuple to sneak through.
# https://github.com/twisted/twisted/blob/c0f1394c7bfb04d97c725a353a1f678fa6a1c602/src/twisted/internet/defer.py#L459
yield d2,
@pytest_twisted.ensureDeferred
async def test(foo):
x = await foo[0]
assert x == 2
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_failed_test(testdir, cmd_opts):
test_file = """
import pytest_twisted
@pytest_twisted.async_yield_fixture()
async def foo():
yield 92
@pytest_twisted.ensureDeferred
async def test(foo):
assert False
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
rr.stdout.fnmatch_lines(lines2=["E*assert False"])
assert_outcomes(rr, {"failed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_test_exception(testdir, cmd_opts):
test_file = """
import pytest_twisted
class UniqueLocalException(Exception):
pass
@pytest_twisted.async_yield_fixture()
async def foo():
yield 92
@pytest_twisted.ensureDeferred
async def test(foo):
raise UniqueLocalException("some message")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
rr.stdout.fnmatch_lines(lines2=["E*.UniqueLocalException: some message*"])
assert_outcomes(rr, {"failed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_yields_twice(testdir, cmd_opts):
test_file = """
import pytest_twisted
@pytest_twisted.async_yield_fixture()
async def foo():
yield 92
yield 36
@pytest_twisted.ensureDeferred
async def test(foo):
assert foo == 92
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1, "errors": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_teardown_exception(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
class UniqueLocalException(Exception):
pass
@pytest_twisted.async_yield_fixture()
async def foo(request):
yield 13
raise UniqueLocalException("some message")
@pytest_twisted.ensureDeferred
async def test_succeed(foo):
assert foo == 13
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
rr.stdout.fnmatch_lines(lines2=["E*.UniqueLocalException: some message*"])
assert_outcomes(rr, {"passed": 1, "errors": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_no_arguments(
testdir,
cmd_opts,
empty_optional_call,
):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_yield_fixture{optional_call}
async def scope(request):
yield request.scope
def test_is_function_scope(scope):
assert scope == "function"
""".format(optional_call=empty_optional_call)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_function_scope(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
check_me = 0
@pytest_twisted.async_yield_fixture(scope="function")
async def foo():
global check_me
if check_me != 0:
raise Exception('check_me already modified before fixture run')
check_me = 1
yield 42
if check_me != 2:
raise Exception(
'check_me not updated properly: {}'.format(check_me),
)
check_me = 0
def test_first(foo):
global check_me
assert check_me == 1
assert foo == 42
check_me = 2
def test_second(foo):
global check_me
assert check_me == 1
assert foo == 42
check_me = 2
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
@skip_if_no_async_await()
def test_async_simple_fixture_in_fixture(testdir, cmd_opts):
test_file = """
import itertools
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_fixture(name='four')
async def fixture_four():
return 4
@pytest_twisted.async_fixture(name='doublefour')
async def fixture_doublefour(four):
return 2 * four
@pytest_twisted.ensureDeferred
async def test_four(four):
assert four == 4
@pytest_twisted.ensureDeferred
async def test_doublefour(doublefour):
assert doublefour == 8
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
@skip_if_no_async_generators()
def test_async_yield_simple_fixture_in_fixture(testdir, cmd_opts):
test_file = """
import itertools
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_yield_fixture(name='four')
async def fixture_four():
yield 4
@pytest_twisted.async_yield_fixture(name='doublefour')
async def fixture_doublefour(four):
yield 2 * four
@pytest_twisted.ensureDeferred
async def test_four(four):
assert four == 4
@pytest_twisted.ensureDeferred
async def test_doublefour(doublefour):
assert doublefour == 8
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
@skip_if_no_async_await()
@pytest.mark.parametrize('innerasync', [
pytest.param(truth, id='innerasync={}'.format(truth))
for truth in [True, False]
])
def test_async_fixture_in_fixture(testdir, cmd_opts, innerasync):
maybe_async = 'async ' if innerasync else ''
maybe_await = 'await ' if innerasync else ''
test_file = """
import itertools
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_fixture(name='increment')
async def fixture_increment():
counts = itertools.count()
{maybe_async}def increment():
return next(counts)
return increment
@pytest_twisted.async_fixture(name='doubleincrement')
async def fixture_doubleincrement(increment):
{maybe_async}def doubleincrement():
n = {maybe_await}increment()
return n * 2
return doubleincrement
@pytest_twisted.ensureDeferred
async def test_increment(increment):
first = {maybe_await}increment()
second = {maybe_await}increment()
assert (first, second) == (0, 1)
@pytest_twisted.ensureDeferred
async def test_doubleincrement(doubleincrement):
first = {maybe_await}doubleincrement()
second = {maybe_await}doubleincrement()
assert (first, second) == (0, 2)
""".format(maybe_async=maybe_async, maybe_await=maybe_await)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
# assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_generators()
@pytest.mark.parametrize('innerasync', [
pytest.param(truth, id='innerasync={}'.format(truth))
for truth in [True, False]
])
def test_async_yield_fixture_in_fixture(testdir, cmd_opts, innerasync):
maybe_async = 'async ' if innerasync else ''
maybe_await = 'await ' if innerasync else ''
test_file = """
import itertools
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_yield_fixture(name='increment')
async def fixture_increment():
counts = itertools.count()
{maybe_async}def increment():
return next(counts)
yield increment
@pytest_twisted.async_yield_fixture(name='doubleincrement')
async def fixture_doubleincrement(increment):
{maybe_async}def doubleincrement():
n = {maybe_await}increment()
return n * 2
yield doubleincrement
@pytest_twisted.ensureDeferred
async def test_increment(increment):
first = {maybe_await}increment()
second = {maybe_await}increment()
assert (first, second) == (0, 1)
@pytest_twisted.ensureDeferred
async def test_doubleincrement(doubleincrement):
first = {maybe_await}doubleincrement()
second = {maybe_await}doubleincrement()
assert (first, second) == (0, 2)
""".format(maybe_async=maybe_async, maybe_await=maybe_await)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
def test_blockon_in_hook(testdir, cmd_opts, request):
skip_if_reactor_not(request, "default")
conftest_file = """
import pytest_twisted
from twisted.internet import reactor, defer
def pytest_configure(config):
pytest_twisted.init_default_reactor()
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, 1)
pytest_twisted.blockon(d1)
pytest_twisted.blockon(d2)
"""
testdir.makeconftest(conftest_file)
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_wrong_reactor(testdir, cmd_opts, request):
skip_if_reactor_not(request, "default")
conftest_file = """
def pytest_addhooks():
import twisted.internet.reactor
twisted.internet.reactor = None
"""
testdir.makeconftest(conftest_file)
test_file = """
def test_succeed():
pass
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert "WrongReactorAlreadyInstalledError" in rr.stderr.str()
def test_blockon_in_hook_with_qt5reactor(testdir, cmd_opts, request):
skip_if_reactor_not(request, "qt5reactor")
conftest_file = """
import pytest_twisted
import pytestqt
from twisted.internet import defer
def pytest_configure(config):
pytest_twisted.init_qt5_reactor()
d = defer.Deferred()
from twisted.internet import reactor
reactor.callLater(0.01, d.callback, 1)
pytest_twisted.blockon(d)
"""
testdir.makeconftest(conftest_file)
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_wrong_reactor_with_qt5reactor(testdir, cmd_opts, request):
skip_if_reactor_not(request, "qt5reactor")
conftest_file = """
def pytest_addhooks():
import twisted.internet.default
twisted.internet.default.install()
"""
testdir.makeconftest(conftest_file)
test_file = """
def test_succeed():
pass
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert "WrongReactorAlreadyInstalledError" in rr.stderr.str()
def test_pytest_from_reactor_thread(testdir, cmd_opts, request):
skip_if_reactor_not(request, "default")
test_file = """
import pytest
import pytest_twisted
from twisted.internet import reactor, defer
@pytest.fixture
def fix():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 42)
return pytest_twisted.blockon(d)
def test_simple(fix):
assert fix == 42
@pytest_twisted.inlineCallbacks
def test_fail():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
yield d
assert False
"""
testdir.makepyfile(test_file)
runner_file = """
import pytest
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.threads import deferToThread
codes = []
@inlineCallbacks
def main():
try:
codes.append((yield deferToThread(pytest.main, ['-k simple'])))
codes.append((yield deferToThread(pytest.main, ['-k fail'])))
finally:
reactor.stop()
if __name__ == '__main__':
reactor.callLater(0, main)
reactor.run()
codes == [0, 1] or exit(1)
"""
testdir.makepyfile(runner=runner_file)
# check test file is ok in standalone mode:
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1, "failed": 1})
# test embedded mode:
assert testdir.run(sys.executable, "runner.py", timeout=timeout).ret == 0
def test_blockon_in_hook_with_asyncio(testdir, cmd_opts, request):
skip_if_reactor_not(request, "asyncio")
conftest_file = """
import pytest
import pytest_twisted
from twisted.internet import defer
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
pytest_twisted._use_asyncio_selector_if_required(config=config)
pytest_twisted.init_asyncio_reactor()
d = defer.Deferred()
from twisted.internet import reactor
reactor.callLater(0.01, d.callback, 1)
pytest_twisted.blockon(d)
"""
testdir.makeconftest(conftest_file)
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_wrong_reactor_with_asyncio(testdir, cmd_opts, request):
skip_if_reactor_not(request, "asyncio")
conftest_file = """
import pytest
import pytest_twisted
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
pytest_twisted._use_asyncio_selector_if_required(config=config)
def pytest_addhooks():
import twisted.internet.default
twisted.internet.default.install()
"""
testdir.makeconftest(conftest_file)
test_file = """
def test_succeed():
pass
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert "WrongReactorAlreadyInstalledError" in rr.stderr.str()
@skip_if_no_async_generators()
def test_async_fixture_module_scope(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
check_me = 0
@pytest_twisted.async_yield_fixture(scope="module")
async def foo():
global check_me
if check_me != 0:
raise Exception('check_me already modified before fixture run')
check_me = 1
yield 42
if check_me != 3:
raise Exception(
'check_me not updated properly: {}'.format(check_me),
)
check_me = 0
def test_first(foo):
global check_me
assert check_me == 1
assert foo == 42
check_me = 2
def test_second(foo):
global check_me
assert check_me == 2
assert foo == 42
check_me = 3
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
def test_inlinecallbacks_method_with_fixture_gets_self(testdir, cmd_opts):
test_file = """
import pytest
import pytest_twisted
from twisted.internet import defer
@pytest.fixture
def foo():
return 37
class TestClass:
@pytest_twisted.inlineCallbacks
def test_self_isinstance(self, foo):
d = defer.succeed(None)
yield d
assert isinstance(self, TestClass)
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts)
assert_outcomes(rr, {"passed": 1})
def test_inlinecallbacks_method_with_fixture_gets_fixture(testdir, cmd_opts):
test_file = """
import pytest
import pytest_twisted
from twisted.internet import defer
@pytest.fixture
def foo():
return 37
class TestClass:
@pytest_twisted.inlineCallbacks
def test_self_isinstance(self, foo):
d = defer.succeed(None)
yield d
assert foo == 37
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_await()
def test_ensuredeferred_method_with_fixture_gets_self(testdir, cmd_opts):
test_file = """
import pytest
import pytest_twisted
@pytest.fixture
def foo():
return 37
class TestClass:
@pytest_twisted.ensureDeferred
async def test_self_isinstance(self, foo):
assert isinstance(self, TestClass)
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_await()
def test_ensuredeferred_method_with_fixture_gets_fixture(testdir, cmd_opts):
test_file = """
import pytest
import pytest_twisted
@pytest.fixture
def foo():
return 37
class TestClass:
@pytest_twisted.ensureDeferred
async def test_self_isinstance(self, foo):
assert foo == 37
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_import_pytest_twisted_in_conftest_py_not_a_problem(testdir, cmd_opts):
conftest_file = """
import pytest
import pytest_twisted
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
pytest_twisted._use_asyncio_selector_if_required(config=config)
"""
testdir.makeconftest(conftest_file)
test_file = """
import pytest_twisted
def test_succeed():
pass
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
| 26.691499
| 125
| 0.663044
|
import sys
import textwrap
import pytest
>= (3, 6)
timeout = 15
def force_plural(name):
if name in {"error", "warning"}:
return name + "s"
return name
def assert_outcomes(run_result, outcomes):
formatted_output = format_run_result_output_for_assert(run_result)
try:
result_outcomes = run_result.parseoutcomes()
except ValueError:
assert False, formatted_output
normalized_result_outcomes = {
force_plural(name): outcome
for name, outcome in result_outcomes.items()
if name != "seconds"
}
assert normalized_result_outcomes == outcomes, formatted_output
def format_run_result_output_for_assert(run_result):
tpl = """
---- stdout
{}
---- stderr
{}
----
"""
return textwrap.dedent(tpl).format(
run_result.stdout.str(), run_result.stderr.str()
)
@pytest.fixture(name="default_conftest", autouse=True)
def _default_conftest(testdir):
testdir.makeconftest(textwrap.dedent("""
import pytest
import pytest_twisted
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
pytest_twisted._use_asyncio_selector_if_required(config=config)
"""))
def skip_if_reactor_not(request, expected_reactor):
actual_reactor = request.config.getoption("reactor", "default")
if actual_reactor != expected_reactor:
pytest.skip(
"reactor is {} not {}".format(actual_reactor, expected_reactor),
)
def skip_if_no_async_await():
return pytest.mark.skipif(
not ASYNC_AWAIT,
reason="async/await syntax not supported on Python <3.5",
)
def skip_if_no_async_generators():
return pytest.mark.skipif(
not ASYNC_GENERATORS,
reason="async generators not support on Python <3.6",
)
@pytest.fixture
def cmd_opts(request):
reactor = request.config.getoption("reactor", "default")
return (
sys.executable,
"-m",
"pytest",
"-v",
"--reactor={}".format(reactor),
)
def test_inline_callbacks_in_pytest():
assert hasattr(pytest, 'inlineCallbacks')
@pytest.mark.parametrize(
'decorator, should_warn',
(
('pytest.inlineCallbacks', True),
('pytest_twisted.inlineCallbacks', False),
),
)
def test_inline_callbacks_in_pytest_deprecation(
testdir,
cmd_opts,
decorator,
should_warn,
):
import_path, _, _ = decorator.rpartition('.')
test_file = """
import {import_path}
def test_deprecation():
@{decorator}
def f():
yield 42
""".format(import_path=import_path, decorator=decorator)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
expected_outcomes = {"passed": 1}
if should_warn:
expected_outcomes["warnings"] = 1
assert_outcomes(rr, expected_outcomes)
def test_blockon_in_pytest():
assert hasattr(pytest, 'blockon')
@pytest.mark.parametrize(
'function, should_warn',
(
('pytest.blockon', True),
('pytest_twisted.blockon', False),
),
)
def test_blockon_in_pytest_deprecation(
testdir,
cmd_opts,
function,
should_warn,
):
import_path, _, _ = function.rpartition('.')
test_file = """
import warnings
from twisted.internet import reactor, defer
import pytest
import {import_path}
@pytest.fixture
def foo(request):
d = defer.Deferred()
d.callback(None)
{function}(d)
def test_succeed(foo):
pass
""".format(import_path=import_path, function=function)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
expected_outcomes = {"passed": 1}
if should_warn:
expected_outcomes["warnings"] = 1
assert_outcomes(rr, expected_outcomes)
def test_fail_later(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
def test_fail():
def doit():
try:
1 / 0
except:
d.errback()
d = defer.Deferred()
reactor.callLater(0.01, doit)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"failed": 1})
def test_succeed_later(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_non_deferred(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
return 42
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_exception(testdir, cmd_opts):
test_file = """
def test_more_fail():
raise RuntimeError("foo")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"failed": 1})
@pytest.fixture(
name="empty_optional_call",
params=["", "()"],
ids=["no call", "empty call"],
)
def empty_optional_call_fixture(request):
return request.param
def test_inlineCallbacks(testdir, cmd_opts, empty_optional_call):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest.fixture(scope="module", params=["fs", "imap", "web"])
def foo(request):
return request.param
@pytest_twisted.inlineCallbacks{optional_call}
def test_succeed(foo):
yield defer.succeed(foo)
if foo == "web":
raise RuntimeError("baz")
""".format(optional_call=empty_optional_call)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
@skip_if_no_async_await()
def test_async_await(testdir, cmd_opts, empty_optional_call):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest.fixture(scope="module", params=["fs", "imap", "web"])
def foo(request):
return request.param
@pytest_twisted.ensureDeferred{optional_call}
async def test_succeed(foo):
await defer.succeed(foo)
if foo == "web":
raise RuntimeError("baz")
""".format(optional_call=empty_optional_call)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
def test_twisted_greenlet(testdir, cmd_opts):
test_file = """
import pytest, greenlet
MAIN = None
@pytest.fixture(scope="session", autouse=True)
def set_MAIN(request, twisted_greenlet):
global MAIN
MAIN = twisted_greenlet
def test_MAIN():
assert MAIN is not None
assert MAIN is greenlet.getcurrent()
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_blockon_in_fixture(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest.fixture(scope="module", params=["fs", "imap", "web"])
def foo(request):
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, request.param)
pytest_twisted.blockon(d1)
return d2
@pytest_twisted.inlineCallbacks
def test_succeed(foo):
x = yield foo
if x == "web":
raise RuntimeError("baz")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
@skip_if_no_async_await()
def test_blockon_in_fixture_async(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest.fixture(scope="module", params=["fs", "imap", "web"])
def foo(request):
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, request.param)
pytest_twisted.blockon(d1)
return d2
@pytest_twisted.ensureDeferred
async def test_succeed(foo):
x = await foo
if x == "web":
raise RuntimeError("baz")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
@skip_if_no_async_await()
def test_async_fixture(testdir, cmd_opts):
pytest_ini_file = """
[pytest]
markers =
redgreenblue
"""
testdir.makefile('.ini', pytest=pytest_ini_file)
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_fixture(
scope="function",
params=["fs", "imap", "web"],
)
@pytest.mark.redgreenblue
async def foo(request):
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, request.param)
await d1
return d2,
@pytest_twisted.inlineCallbacks
def test_succeed_blue(foo):
x = yield foo[0]
if x == "web":
raise RuntimeError("baz")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2, "failed": 1})
@skip_if_no_async_await()
def test_async_fixture_no_arguments(testdir, cmd_opts, empty_optional_call):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_fixture{optional_call}
async def scope(request):
return request.scope
def test_is_function_scope(scope):
assert scope == "function"
""".format(optional_call=empty_optional_call)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_ordered_teardown(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
results = []
@pytest.fixture(scope='function')
def sync_fixture():
yield 42
results.append(2)
@pytest_twisted.async_yield_fixture(scope='function')
async def async_fixture(sync_fixture):
yield sync_fixture
results.append(1)
def test_first(async_fixture):
assert async_fixture == 42
def test_second():
assert results == [1, 2]
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
@skip_if_no_async_generators()
def test_async_yield_fixture_can_await(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest_twisted
@pytest_twisted.async_yield_fixture()
async def foo():
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, 2)
await d1
# Twisted doesn't allow calling back with a Deferred as a value.
# This deferred is being wrapped up in a tuple to sneak through.
# https://github.com/twisted/twisted/blob/c0f1394c7bfb04d97c725a353a1f678fa6a1c602/src/twisted/internet/defer.py#L459
yield d2,
@pytest_twisted.ensureDeferred
async def test(foo):
x = await foo[0]
assert x == 2
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_failed_test(testdir, cmd_opts):
test_file = """
import pytest_twisted
@pytest_twisted.async_yield_fixture()
async def foo():
yield 92
@pytest_twisted.ensureDeferred
async def test(foo):
assert False
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
rr.stdout.fnmatch_lines(lines2=["E*assert False"])
assert_outcomes(rr, {"failed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_test_exception(testdir, cmd_opts):
test_file = """
import pytest_twisted
class UniqueLocalException(Exception):
pass
@pytest_twisted.async_yield_fixture()
async def foo():
yield 92
@pytest_twisted.ensureDeferred
async def test(foo):
raise UniqueLocalException("some message")
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
rr.stdout.fnmatch_lines(lines2=["E*.UniqueLocalException: some message*"])
assert_outcomes(rr, {"failed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_yields_twice(testdir, cmd_opts):
test_file = """
import pytest_twisted
@pytest_twisted.async_yield_fixture()
async def foo():
yield 92
yield 36
@pytest_twisted.ensureDeferred
async def test(foo):
assert foo == 92
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1, "errors": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_teardown_exception(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
class UniqueLocalException(Exception):
pass
@pytest_twisted.async_yield_fixture()
async def foo(request):
yield 13
raise UniqueLocalException("some message")
@pytest_twisted.ensureDeferred
async def test_succeed(foo):
assert foo == 13
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
rr.stdout.fnmatch_lines(lines2=["E*.UniqueLocalException: some message*"])
assert_outcomes(rr, {"passed": 1, "errors": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_no_arguments(
testdir,
cmd_opts,
empty_optional_call,
):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_yield_fixture{optional_call}
async def scope(request):
yield request.scope
def test_is_function_scope(scope):
assert scope == "function"
""".format(optional_call=empty_optional_call)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_generators()
def test_async_yield_fixture_function_scope(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
check_me = 0
@pytest_twisted.async_yield_fixture(scope="function")
async def foo():
global check_me
if check_me != 0:
raise Exception('check_me already modified before fixture run')
check_me = 1
yield 42
if check_me != 2:
raise Exception(
'check_me not updated properly: {}'.format(check_me),
)
check_me = 0
def test_first(foo):
global check_me
assert check_me == 1
assert foo == 42
check_me = 2
def test_second(foo):
global check_me
assert check_me == 1
assert foo == 42
check_me = 2
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
@skip_if_no_async_await()
def test_async_simple_fixture_in_fixture(testdir, cmd_opts):
test_file = """
import itertools
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_fixture(name='four')
async def fixture_four():
return 4
@pytest_twisted.async_fixture(name='doublefour')
async def fixture_doublefour(four):
return 2 * four
@pytest_twisted.ensureDeferred
async def test_four(four):
assert four == 4
@pytest_twisted.ensureDeferred
async def test_doublefour(doublefour):
assert doublefour == 8
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
@skip_if_no_async_generators()
def test_async_yield_simple_fixture_in_fixture(testdir, cmd_opts):
test_file = """
import itertools
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_yield_fixture(name='four')
async def fixture_four():
yield 4
@pytest_twisted.async_yield_fixture(name='doublefour')
async def fixture_doublefour(four):
yield 2 * four
@pytest_twisted.ensureDeferred
async def test_four(four):
assert four == 4
@pytest_twisted.ensureDeferred
async def test_doublefour(doublefour):
assert doublefour == 8
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
@skip_if_no_async_await()
@pytest.mark.parametrize('innerasync', [
pytest.param(truth, id='innerasync={}'.format(truth))
for truth in [True, False]
])
def test_async_fixture_in_fixture(testdir, cmd_opts, innerasync):
maybe_async = 'async ' if innerasync else ''
maybe_await = 'await ' if innerasync else ''
test_file = """
import itertools
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_fixture(name='increment')
async def fixture_increment():
counts = itertools.count()
{maybe_async}def increment():
return next(counts)
return increment
@pytest_twisted.async_fixture(name='doubleincrement')
async def fixture_doubleincrement(increment):
{maybe_async}def doubleincrement():
n = {maybe_await}increment()
return n * 2
return doubleincrement
@pytest_twisted.ensureDeferred
async def test_increment(increment):
first = {maybe_await}increment()
second = {maybe_await}increment()
assert (first, second) == (0, 1)
@pytest_twisted.ensureDeferred
async def test_doubleincrement(doubleincrement):
first = {maybe_await}doubleincrement()
second = {maybe_await}doubleincrement()
assert (first, second) == (0, 2)
""".format(maybe_async=maybe_async, maybe_await=maybe_await)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
# assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_generators()
@pytest.mark.parametrize('innerasync', [
pytest.param(truth, id='innerasync={}'.format(truth))
for truth in [True, False]
])
def test_async_yield_fixture_in_fixture(testdir, cmd_opts, innerasync):
maybe_async = 'async ' if innerasync else ''
maybe_await = 'await ' if innerasync else ''
test_file = """
import itertools
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
@pytest_twisted.async_yield_fixture(name='increment')
async def fixture_increment():
counts = itertools.count()
{maybe_async}def increment():
return next(counts)
yield increment
@pytest_twisted.async_yield_fixture(name='doubleincrement')
async def fixture_doubleincrement(increment):
{maybe_async}def doubleincrement():
n = {maybe_await}increment()
return n * 2
yield doubleincrement
@pytest_twisted.ensureDeferred
async def test_increment(increment):
first = {maybe_await}increment()
second = {maybe_await}increment()
assert (first, second) == (0, 1)
@pytest_twisted.ensureDeferred
async def test_doubleincrement(doubleincrement):
first = {maybe_await}doubleincrement()
second = {maybe_await}doubleincrement()
assert (first, second) == (0, 2)
""".format(maybe_async=maybe_async, maybe_await=maybe_await)
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
def test_blockon_in_hook(testdir, cmd_opts, request):
skip_if_reactor_not(request, "default")
conftest_file = """
import pytest_twisted
from twisted.internet import reactor, defer
def pytest_configure(config):
pytest_twisted.init_default_reactor()
d1, d2 = defer.Deferred(), defer.Deferred()
reactor.callLater(0.01, d1.callback, 1)
reactor.callLater(0.02, d2.callback, 1)
pytest_twisted.blockon(d1)
pytest_twisted.blockon(d2)
"""
testdir.makeconftest(conftest_file)
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_wrong_reactor(testdir, cmd_opts, request):
skip_if_reactor_not(request, "default")
conftest_file = """
def pytest_addhooks():
import twisted.internet.reactor
twisted.internet.reactor = None
"""
testdir.makeconftest(conftest_file)
test_file = """
def test_succeed():
pass
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert "WrongReactorAlreadyInstalledError" in rr.stderr.str()
def test_blockon_in_hook_with_qt5reactor(testdir, cmd_opts, request):
skip_if_reactor_not(request, "qt5reactor")
conftest_file = """
import pytest_twisted
import pytestqt
from twisted.internet import defer
def pytest_configure(config):
pytest_twisted.init_qt5_reactor()
d = defer.Deferred()
from twisted.internet import reactor
reactor.callLater(0.01, d.callback, 1)
pytest_twisted.blockon(d)
"""
testdir.makeconftest(conftest_file)
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_wrong_reactor_with_qt5reactor(testdir, cmd_opts, request):
skip_if_reactor_not(request, "qt5reactor")
conftest_file = """
def pytest_addhooks():
import twisted.internet.default
twisted.internet.default.install()
"""
testdir.makeconftest(conftest_file)
test_file = """
def test_succeed():
pass
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert "WrongReactorAlreadyInstalledError" in rr.stderr.str()
def test_pytest_from_reactor_thread(testdir, cmd_opts, request):
skip_if_reactor_not(request, "default")
test_file = """
import pytest
import pytest_twisted
from twisted.internet import reactor, defer
@pytest.fixture
def fix():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 42)
return pytest_twisted.blockon(d)
def test_simple(fix):
assert fix == 42
@pytest_twisted.inlineCallbacks
def test_fail():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
yield d
assert False
"""
testdir.makepyfile(test_file)
runner_file = """
import pytest
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.threads import deferToThread
codes = []
@inlineCallbacks
def main():
try:
codes.append((yield deferToThread(pytest.main, ['-k simple'])))
codes.append((yield deferToThread(pytest.main, ['-k fail'])))
finally:
reactor.stop()
if __name__ == '__main__':
reactor.callLater(0, main)
reactor.run()
codes == [0, 1] or exit(1)
"""
testdir.makepyfile(runner=runner_file)
# check test file is ok in standalone mode:
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1, "failed": 1})
# test embedded mode:
assert testdir.run(sys.executable, "runner.py", timeout=timeout).ret == 0
def test_blockon_in_hook_with_asyncio(testdir, cmd_opts, request):
skip_if_reactor_not(request, "asyncio")
conftest_file = """
import pytest
import pytest_twisted
from twisted.internet import defer
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
pytest_twisted._use_asyncio_selector_if_required(config=config)
pytest_twisted.init_asyncio_reactor()
d = defer.Deferred()
from twisted.internet import reactor
reactor.callLater(0.01, d.callback, 1)
pytest_twisted.blockon(d)
"""
testdir.makeconftest(conftest_file)
test_file = """
from twisted.internet import reactor, defer
def test_succeed():
d = defer.Deferred()
reactor.callLater(0.01, d.callback, 1)
return d
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_wrong_reactor_with_asyncio(testdir, cmd_opts, request):
skip_if_reactor_not(request, "asyncio")
conftest_file = """
import pytest
import pytest_twisted
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
pytest_twisted._use_asyncio_selector_if_required(config=config)
def pytest_addhooks():
import twisted.internet.default
twisted.internet.default.install()
"""
testdir.makeconftest(conftest_file)
test_file = """
def test_succeed():
pass
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert "WrongReactorAlreadyInstalledError" in rr.stderr.str()
@skip_if_no_async_generators()
def test_async_fixture_module_scope(testdir, cmd_opts):
test_file = """
from twisted.internet import reactor, defer
import pytest
import pytest_twisted
check_me = 0
@pytest_twisted.async_yield_fixture(scope="module")
async def foo():
global check_me
if check_me != 0:
raise Exception('check_me already modified before fixture run')
check_me = 1
yield 42
if check_me != 3:
raise Exception(
'check_me not updated properly: {}'.format(check_me),
)
check_me = 0
def test_first(foo):
global check_me
assert check_me == 1
assert foo == 42
check_me = 2
def test_second(foo):
global check_me
assert check_me == 2
assert foo == 42
check_me = 3
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 2})
def test_inlinecallbacks_method_with_fixture_gets_self(testdir, cmd_opts):
test_file = """
import pytest
import pytest_twisted
from twisted.internet import defer
@pytest.fixture
def foo():
return 37
class TestClass:
@pytest_twisted.inlineCallbacks
def test_self_isinstance(self, foo):
d = defer.succeed(None)
yield d
assert isinstance(self, TestClass)
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts)
assert_outcomes(rr, {"passed": 1})
def test_inlinecallbacks_method_with_fixture_gets_fixture(testdir, cmd_opts):
test_file = """
import pytest
import pytest_twisted
from twisted.internet import defer
@pytest.fixture
def foo():
return 37
class TestClass:
@pytest_twisted.inlineCallbacks
def test_self_isinstance(self, foo):
d = defer.succeed(None)
yield d
assert foo == 37
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_await()
def test_ensuredeferred_method_with_fixture_gets_self(testdir, cmd_opts):
test_file = """
import pytest
import pytest_twisted
@pytest.fixture
def foo():
return 37
class TestClass:
@pytest_twisted.ensureDeferred
async def test_self_isinstance(self, foo):
assert isinstance(self, TestClass)
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
@skip_if_no_async_await()
def test_ensuredeferred_method_with_fixture_gets_fixture(testdir, cmd_opts):
test_file = """
import pytest
import pytest_twisted
@pytest.fixture
def foo():
return 37
class TestClass:
@pytest_twisted.ensureDeferred
async def test_self_isinstance(self, foo):
assert foo == 37
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
def test_import_pytest_twisted_in_conftest_py_not_a_problem(testdir, cmd_opts):
conftest_file = """
import pytest
import pytest_twisted
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
pytest_twisted._use_asyncio_selector_if_required(config=config)
"""
testdir.makeconftest(conftest_file)
test_file = """
import pytest_twisted
def test_succeed():
pass
"""
testdir.makepyfile(test_file)
rr = testdir.run(*cmd_opts, timeout=timeout)
assert_outcomes(rr, {"passed": 1})
| true
| true
|
f71966a40eb176b3c19c2ff7f010677d27b381e5
| 2,799
|
py
|
Python
|
channel-api/tests/integration/conftest.py
|
xcantera/demo-provide-baseline
|
985f391973fa6ca0761104b55077fded28f152fc
|
[
"CC0-1.0"
] | 3
|
2020-11-17T23:19:20.000Z
|
2021-03-29T15:08:56.000Z
|
channel-api/tests/integration/conftest.py
|
xcantera/demo-provide-baseline
|
985f391973fa6ca0761104b55077fded28f152fc
|
[
"CC0-1.0"
] | null | null | null |
channel-api/tests/integration/conftest.py
|
xcantera/demo-provide-baseline
|
985f391973fa6ca0761104b55077fded28f152fc
|
[
"CC0-1.0"
] | 1
|
2020-12-11T00:26:33.000Z
|
2020-12-11T00:26:33.000Z
|
import pytest
from http import HTTPStatus
import urllib
import requests
from src import repos
from libtrustbridge.utils.conf import env_s3_config, env_queue_config, env
NOTIFICATIONS_REPO = env_queue_config('NOTIFICATIONS_REPO')
DELIVERY_OUTBOX_REPO = env_queue_config('DELIVERY_OUTBOX_REPO')
SUBSCRIPTIONS_REPO = env_s3_config('SUBSCRIPTIONS_REPO')
CHANNEL_REPO = env_queue_config('CHANNEL_REPO')
ENDPOINT = env('ENDPOINT', default='AU')
@pytest.fixture(scope='function')
def notifications_repo():
repo = repos.Notifications(NOTIFICATIONS_REPO)
repo.WAIT_FOR_MESSAGE_SECONDS = 1
repo._unsafe_method__clear()
yield repo
@pytest.fixture(scope='function')
def delivery_outbox_repo():
repo = repos.DeliveryOutbox(DELIVERY_OUTBOX_REPO)
repo.WAIT_FOR_MESSAGE_SECONDS = 1
repo._unsafe_method__clear()
yield repo
@pytest.fixture(scope='function')
def subscriptions_repo():
repo = repos.Subscriptions(SUBSCRIPTIONS_REPO)
repo._unsafe_method__clear()
yield repo
@pytest.fixture(scope='function')
def channel_repo():
repo = repos.Channel(CHANNEL_REPO)
repo.WAIT_FOR_MESSAGE_SECONDS = 1
repo._unsafe_method__clear()
yield repo
class CallbackServer:
def __init__(self, base_url=None):
self.base_url = base_url if base_url.endswith('/') else base_url + '/'
def get_callback_record(self, index):
url = urllib.parse.urljoin(self.base_url, f'callbacks/{index}')
response = requests.get(url)
if response.status_code == HTTPStatus.OK:
return response.json()
elif response.status_code == HTTPStatus.NOT_FOUND:
return None
else:
raise Exception(f'Unexpected response:{response.status_code}')
def get_callback_records(self):
url = urllib.parse.urljoin(self.base_url, 'callbacks')
response = requests.get(url)
if response.status_code == HTTPStatus.OK:
return response.json()
else:
raise Exception(f'Unexpected response:{response.status_code}')
def clear_callback_records(self):
url = urllib.parse.urljoin(self.base_url, 'callbacks')
response = requests.delete(url)
if response.status_code == HTTPStatus.OK:
pass
else:
raise Exception(f'Unexpected response:{response.status_code}')
def valid_callback_url(self, id):
return urllib.parse.urljoin(self.base_url, f'callback/valid/{id}')
def invalid_callback_url(self, id):
return urllib.parse.urljoin(self.base_url, f'callback/invalid/{id}')
@pytest.fixture(scope='function')
def callback_server():
callback_server = CallbackServer('http://baseline-channel-api-callback-server:11001')
callback_server.clear_callback_records()
yield callback_server
| 31.1
| 89
| 0.71597
|
import pytest
from http import HTTPStatus
import urllib
import requests
from src import repos
from libtrustbridge.utils.conf import env_s3_config, env_queue_config, env
NOTIFICATIONS_REPO = env_queue_config('NOTIFICATIONS_REPO')
DELIVERY_OUTBOX_REPO = env_queue_config('DELIVERY_OUTBOX_REPO')
SUBSCRIPTIONS_REPO = env_s3_config('SUBSCRIPTIONS_REPO')
CHANNEL_REPO = env_queue_config('CHANNEL_REPO')
ENDPOINT = env('ENDPOINT', default='AU')
@pytest.fixture(scope='function')
def notifications_repo():
repo = repos.Notifications(NOTIFICATIONS_REPO)
repo.WAIT_FOR_MESSAGE_SECONDS = 1
repo._unsafe_method__clear()
yield repo
@pytest.fixture(scope='function')
def delivery_outbox_repo():
repo = repos.DeliveryOutbox(DELIVERY_OUTBOX_REPO)
repo.WAIT_FOR_MESSAGE_SECONDS = 1
repo._unsafe_method__clear()
yield repo
@pytest.fixture(scope='function')
def subscriptions_repo():
repo = repos.Subscriptions(SUBSCRIPTIONS_REPO)
repo._unsafe_method__clear()
yield repo
@pytest.fixture(scope='function')
def channel_repo():
repo = repos.Channel(CHANNEL_REPO)
repo.WAIT_FOR_MESSAGE_SECONDS = 1
repo._unsafe_method__clear()
yield repo
class CallbackServer:
def __init__(self, base_url=None):
self.base_url = base_url if base_url.endswith('/') else base_url + '/'
def get_callback_record(self, index):
url = urllib.parse.urljoin(self.base_url, f'callbacks/{index}')
response = requests.get(url)
if response.status_code == HTTPStatus.OK:
return response.json()
elif response.status_code == HTTPStatus.NOT_FOUND:
return None
else:
raise Exception(f'Unexpected response:{response.status_code}')
def get_callback_records(self):
url = urllib.parse.urljoin(self.base_url, 'callbacks')
response = requests.get(url)
if response.status_code == HTTPStatus.OK:
return response.json()
else:
raise Exception(f'Unexpected response:{response.status_code}')
def clear_callback_records(self):
url = urllib.parse.urljoin(self.base_url, 'callbacks')
response = requests.delete(url)
if response.status_code == HTTPStatus.OK:
pass
else:
raise Exception(f'Unexpected response:{response.status_code}')
def valid_callback_url(self, id):
return urllib.parse.urljoin(self.base_url, f'callback/valid/{id}')
def invalid_callback_url(self, id):
return urllib.parse.urljoin(self.base_url, f'callback/invalid/{id}')
@pytest.fixture(scope='function')
def callback_server():
callback_server = CallbackServer('http://baseline-channel-api-callback-server:11001')
callback_server.clear_callback_records()
yield callback_server
| true
| true
|
f7196801f8fa58470aa03ad73efa1012011af858
| 28,195
|
py
|
Python
|
sacla/scripts/backups/sacla3_Chip_Manager_v7BAK.py
|
beamline-i24/DiamondChips
|
02fb58a95ad2c1712c41b641eb5f197d688c54c3
|
[
"Apache-2.0"
] | null | null | null |
sacla/scripts/backups/sacla3_Chip_Manager_v7BAK.py
|
beamline-i24/DiamondChips
|
02fb58a95ad2c1712c41b641eb5f197d688c54c3
|
[
"Apache-2.0"
] | null | null | null |
sacla/scripts/backups/sacla3_Chip_Manager_v7BAK.py
|
beamline-i24/DiamondChips
|
02fb58a95ad2c1712c41b641eb5f197d688c54c3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import pv, os, re, sys
import math, time, string
import numpy as np
from time import sleep
from ca import caput, caget
import logging as lg
import sacla3_Chip_StartUp_v7 as startup
import sacla3_Chip_Mapping_v7 as mapping
lg.basicConfig(format='%(asctime)s %(levelname)s: \t%(message)s',level=lg.DEBUG, filename='SACLA3v7.log')
##############################################
# MANAGER MANAGER MANAGER MANAGER MANAGER #
# This version last edited 03Sep2017 by DAS #
# Prep for SACLA3 #
##############################################
def initialise():
lg.info('INITIALISED')
lg.warning('INITIALISED')
lg.debug('INITIALISED')
caput(pv.me14e_stage_x + '.VMAX', 15)
caput(pv.me14e_stage_y + '.VMAX', 15)
caput(pv.me14e_stage_z + '.VMAX', 15)
caput(pv.me14e_filter + '.VMAX', 15)
caput(pv.me14e_stage_x + '.VELO', 15)
caput(pv.me14e_stage_y + '.VELO', 15)
caput(pv.me14e_stage_z + '.VELO', 15)
caput(pv.me14e_filter + '.VELO', 15)
caput(pv.me14e_stage_x + '.ACCL', 0.01)
caput(pv.me14e_stage_y + '.ACCL', 0.01)
caput(pv.me14e_stage_z + '.ACCL', 0.01)
caput(pv.me14e_filter + '.ACCL', 0.01)
caput(pv.me14e_stage_x + '.HLM', 30)
caput(pv.me14e_stage_x + '.LLM', -30)
caput(pv.me14e_stage_y + '.HLM', 30)
caput(pv.me14e_stage_y + '.LLM', -30)
caput(pv.me14e_stage_z + '.HLM', 5.1)
caput(pv.me14e_stage_z + '.LLM', -4.1)
caput(pv.me14e_filter + '.HLM', 0.1)
caput(pv.me14e_filter + '.LLM', -45.0)
caput('ME14E-MO-IOC-01:GP1', 0)
caput('ME14E-MO-IOC-01:GP2', 0)
print 'Clearing'
for i in range(3, 100):
pvar = 'ME14E-MO-IOC-01:GP' + str(i)
val = caput(pvar, 1)
sys.stdout.write('.')
sys.stdout.flush()
print '\nDONT FORGET TO DO THIS: export EPICS_CA_ADDR_LIST=172.23.190.255'
print 'DONT FORGET TO DO THIS: export EPICS_CA_AUTO_ADDR_LIST=NO'
print 'Initialisation Complete'
def write_parameter_file():
print '\n\n', 10*'set', '\n'
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
param_fid = 'parameters.txt'
print 'Writing Parameter File\n', param_path+param_fid
lg.info('Writing Parameter File\n', param_path+param_fid)
lg.info('CHIP_MANAGER\twrite_parameter_file:Writing')
f = open(param_path + param_fid,'w')
chip_name = caget(pv.me14e_chip_name)
f.write('chip_name \t%s\n' %chip_name)
print 'chip_name:', chip_name
#f.write('path \t%s\n' %path)
#print 'path:', path
protein_name = caget(pv.me14e_filepath)
f.write('protein_name \t%s\n' %protein_name)
print 'protein_name:', protein_name
n_exposures = caget(pv.me14e_gp3)
f.write('n_exposures \t%s\n' %n_exposures)
print 'n_exposures', n_exposures
chip_type = caget(pv.me14e_gp1)
#### Hack for sacla3 to bismuth chip type for oxford inner
if str(chip_type) =='3':
chip_type = '1'
f.write('chip_type \t%s\n' %chip_type)
print 'chip_type', chip_type
map_type = caget(pv.me14e_gp2)
f.write('map_type \t%s\n' %map_type)
print 'map_type', map_type
f.close()
print '\n', 10*'set', '\n\n'
def define_current_chip(chipid):
load_stock_map('clear')
"""
Not sure what this is for:
print 'Setting Mapping Type to Lite'
caput(pv.me14e_gp2, 1)
"""
chip_type = caget(pv.me14e_gp1)
print chip_type, chipid
if chipid == 'toronto':
caput(pv.me14e_gp1, 0)
elif chipid == 'oxford':
caput(pv.me14e_gp1, 1)
elif chipid == 'hamburg':
caput(pv.me14e_gp1, 2)
elif chipid == 'hamburgfull':
caput(pv.me14e_gp1, 2)
elif chipid == 'bismuth1':
caput(pv.me14e_gp1, 3)
elif chipid == 'bismuth2':
caput(pv.me14e_gp1, 4)
elif chipid == 'regina':
caput(pv.me14e_gp1, 5)
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(param_path + chipid + '.pvar', 'r')
for line in f.readlines():
s = line.rstrip('\n')
print s
if line.startswith('#'):
continue
caput(pv.me14e_pmac_str, s)
print param_path + chipid + '.chip'
print 10*'Done '
def save_screen_map():
#litemap_path = '/dls_sw/i24/scripts/fastchips/litemaps/'
litemap_path = '/localhome/local/Documents/sacla/parameter_files/'
print '\n\nSaving', litemap_path + 'currentchip.map'
f = open(litemap_path + 'currentchip.map','w')
print 'Printing only blocks with block_val == 1'
for x in range(1, 82):
block_str = 'ME14E-MO-IOC-01:GP%i' %(x+10)
block_val = caget(block_str)
if block_val == 1:
print block_str, block_val
line = '%02dstatus P3%02d1 \t%s\n' %(x, x, block_val)
f.write(line)
f.close()
print 10*'Done '
return 0
def upload_parameters(chipid):
if chipid == 'toronto':
caput(pv.me14e_gp1, 0)
width = 9
elif chipid == 'oxford':
caput(pv.me14e_gp1, 1)
width = 8
elif chipid == 'hamburg':
caput(pv.me14e_gp1, 2)
width = 3
elif chipid == 'bismuth1':
caput(pv.me14e_gp1, 3)
width = 1
elif chipid == 'bismuth2':
caput(pv.me14e_gp1, 4)
width = 7
elif chipid == 'regina':
caput(pv.me14e_gp1, 5)
width = 7
#litemap_path = '/dls_sw/i24/scripts/fastchips/litemaps/'
litemap_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(litemap_path + 'currentchip.map','r')
print 'chipid', chipid
print width
x = 1
for line in f.readlines()[:width**2]:
cols = line.split( )
pvar = cols[1]
value = cols[2]
s = pvar +'='+ value
if value != '1':
s2 = pvar + ' '
sys.stdout.write(s2)
else:
sys.stdout.write(s+' ')
sys.stdout.flush()
if x == width:
print
x = 1
else:
x += 1
caput(pv.me14e_pmac_str, s)
sleep(0.02)
print
print 'Setting Mapping Type to Lite'
caput(pv.me14e_gp2, 1)
print 10*'Done '
def upload_full():
#fullmap_path = '/dls_sw/i24/scripts/fastchips/fullmaps/'
fullmap_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(fullmap_path + 'currentchip.full', 'r').readlines()
for x in range(len(f) / 2):
pmac_list = []
for i in range(2):
pmac_list.append(f.pop(0).rstrip('\n'))
writeline = " ".join(pmac_list)
print writeline
caput(pv.me14e_pmac_str, writeline)
sleep(0.02)
print 10*'Done '
def load_stock_map(map_choice):
print 'Please wait, adjusting lite map'
#
r33 = [19,18,17,26,31,32,33,24,25]
r55 = [9,10,11,12,13,16,27,30,41,40,39,38,37,34,23,20] + r33
r77 = [7,6,5,4,3,2,1,14,15,28,29,42,43,44,45,46,47,48,49,36,35,22,21,8] + r55
#
h33 = [3,2,1,6,7,8,9,4,5]
x33 = [31,32,33,40,51,50,49,42,41]
x55 = [25,24,23,22,21,34,39,52,57,58,59,60,61,48,43,30] + x33
x77 = [11,12,13,14,15,16,17,20,35,38,53,56,71,70,69,68,67,66,65,62,47,44,29,26] + x55
x99 = [9,8,7,6,5,4,3,2,1,18,19,36,37,54,55,72,73,74,75,76,77,78,79,80,81,64,63,46,45,28,27,10] + x77
x44 = [22,21,20,19,30,35,46,45,44,43,38,27,28,29,36,37]
x49 = [x+1 for x in range(49)]
x66 = [10,11,12,13,14,15,18,31,34,47,50,51,52,53,54,55,42,39,26,23] + x44
x88 = [8,7,6,5,4,3,2,1,16,17,32,33,48,49,64,63,62,61,60,59,58,57,56,41,40,25,24,9] + x66
map_dict = {}
map_dict['clear']= [1]
#
map_dict['r33'] = r33
map_dict['r55'] = r55
map_dict['r77'] = r77
#
map_dict['h33'] = h33
#
map_dict['x33'] = x33
map_dict['x44'] = x44
map_dict['x49'] = x49
map_dict['x55'] = x55
map_dict['x66'] = x66
map_dict['x77'] = x77
map_dict['x88'] = x88
map_dict['x99'] = x99
print 'Clearing'
for i in range(1, 82):
pvar = 'ME14E-MO-IOC-01:GP' + str(i + 10)
caput(pvar, 0)
sys.stdout.write('.')
sys.stdout.flush()
print '\nmap cleared'
print 'loading map_choice', map_choice
for i in map_dict[map_choice]:
pvar = 'ME14E-MO-IOC-01:GP' + str(i + 10)
caput(pvar, 1)
print 10*'Done '
def load_lite_map():
load_stock_map('clear')
toronto_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03', 'A4':'04', 'A5':'05', 'A6':'06','A7':'07', 'A8':'08', 'A9':'09'
,'B1':'18', 'B2':'17', 'B3':'16', 'B4':'15', 'B5':'14', 'B6':'13','B7':'12', 'B8':'11', 'B9':'10'
,'C1':'19', 'C2':'20', 'C3':'21', 'C4':'22', 'C5':'23', 'C6':'24','C7':'25', 'C8':'26', 'C9':'27'
,'D1':'36', 'D2':'35', 'D3':'34', 'D4':'33', 'D5':'32', 'D6':'31','D7':'30', 'D8':'29', 'D9':'28'
,'E1':'37', 'E2':'38', 'E3':'39', 'E4':'40', 'E5':'41', 'E6':'42','E7':'43', 'E8':'44', 'E9':'45'
,'F1':'54', 'F2':'53', 'F3':'52', 'F4':'51', 'F5':'50', 'F6':'49','F7':'48', 'F8':'47', 'F9':'46'
,'G1':'55', 'G2':'56', 'G3':'57', 'G4':'58', 'G5':'59', 'G6':'60','G7':'61', 'G8':'62', 'G9':'63'
,'H1':'72', 'H2':'71', 'H3':'70', 'H4':'69', 'H5':'68', 'H6':'67','H7':'66', 'H8':'65', 'H9':'64'
,'I1':'73', 'I2':'74', 'I3':'75', 'I4':'76', 'I5':'77', 'I6':'78','I7':'79', 'I8':'80', 'I9':'81'}
#Oxford_block_dict is wrong (columns and rows need to flip) added in script below to generate it automatically however kept this for backwards compatiability/reference
oxford_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03', 'A4':'04', 'A5':'05', 'A6':'06','A7':'07', 'A8':'08'
,'B1':'16', 'B2':'15', 'B3':'14', 'B4':'13', 'B5':'12', 'B6':'11','B7':'10', 'B8':'09'
,'C1':'17', 'C2':'18', 'C3':'19', 'C4':'20', 'C5':'21', 'C6':'22','C7':'23', 'C8':'24'
,'D1':'32', 'D2':'31', 'D3':'30', 'D4':'29', 'D5':'28', 'D6':'27','D7':'26', 'D8':'25'
,'E1':'33', 'E2':'34', 'E3':'35', 'E4':'36', 'E5':'37', 'E6':'38','E7':'39', 'E8':'40'
,'F1':'48', 'F2':'47', 'F3':'46', 'F4':'45', 'F5':'44', 'F6':'43','F7':'42', 'F8':'41'
,'G1':'49', 'G2':'50', 'G3':'51', 'G4':'52', 'G5':'53', 'G6':'54','G7':'55', 'G8':'56'
,'H1':'64', 'H2':'63', 'H3':'62', 'H4':'61', 'H5':'60', 'H6':'59','H7':'58', 'H8':'57'}
regina_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03', 'A4':'04', 'A5':'05', 'A6':'06','A7':'07'
,'B1':'14', 'B2':'13', 'B3':'12', 'B4':'11', 'B5':'10', 'B6':'09','B7':'08'
,'C1':'15', 'C2':'16', 'C3':'17', 'C4':'18', 'C5':'19', 'C6':'20','C7':'21'
,'D1':'28', 'D2':'27', 'D3':'26', 'D4':'25', 'D5':'24', 'D6':'23','D7':'22'
,'E1':'29', 'E2':'30', 'E3':'31', 'E4':'32', 'E5':'33', 'E6':'34','E7':'35'
,'F1':'42', 'F2':'41', 'F3':'40', 'F4':'39', 'F5':'38', 'F6':'37','F7':'36'
,'G1':'43', 'G2':'44', 'G3':'45', 'G4':'46', 'G5':'47', 'G6':'48','G7':'49'}
hamburg_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03'
,'B1':'06', 'B2':'05', 'B3':'04'
,'C1':'07', 'C2':'08', 'C3':'09'}
chip_type = caget(pv.me14e_gp1)
if chip_type == 0:
print 'Toronto Block Order'
block_dict = toronto_block_dict
elif chip_type == 1:
print 'Oxford Block Order'
#block_dict = oxford_block_dict
rows = ['A','B','C','D','E','F','G','H']
columns = list(range(1,9))
btn_names = {}
flip = True
for x, column in enumerate(columns):
for y,row in enumerate(rows):
i=x*8+y
if i%8 == 0 and flip == False:
flip = True
z = 8 - (y+1)
elif i%8 == 0 and flip == True:
flip = False
z = y
elif flip == False:
z = y
elif flip == True:
z = 8 - (y+1)
else:
print('something is wrong with chip grid creation')
break
button_name = str(row)+str(column)
lab_num = x*8+z
label='%02.d'%(lab_num+1)
btn_names[button_name] = label
#print button_name, btn_names[button_name]
block_dict = btn_names
elif chip_type == 2:
print 'Hamburg Block Order'
block_dict = hamburg_block_dict
elif chip_type == 5:
print 'Regina Block Order'
block_dict = regina_block_dict
#litemap_path = '/dls_sw/i24/scripts/fastchips/litemaps/'
litemap_path = '/localhome/local/Documents/sacla/parameter_files/'
litemap_fid = str(caget(pv.me14e_gp5)) + '.lite'
print 'opening', litemap_path + litemap_fid
f = open(litemap_path + litemap_fid, 'r')
print 'please wait, loading LITE map'
for line in f.readlines():
entry = line.split()
block_name = entry[0]
yesno = entry[1]
block_num = block_dict[block_name]
pvar = 'ME14E-MO-IOC-01:GP' + str(int(block_num) + 10)
print block_name, yesno, pvar
caput(pvar, yesno)
print 10*'Done '
def load_full_map(location ='SACLA'):
if location == 'i24':
chip_name, visit, sub_dir, n_exposures, chip_type, map_type = startup.scrape_parameter_file(location)
else:
chip_name, sub_dir, n_exposures, chip_type, map_type = startup.scrape_parameter_file(location)
#fullmap_path = '/dls_sw/i24/scripts/fastchips/fullmaps/'
fullmap_path = '/localhome/local/Documents/sacla/parameter_files/'
fullmap_fid = fullmap_path + str(caget(pv.me14e_gp5)) + '.spec'
print 'opening', fullmap_fid
mapping.plot_file(fullmap_fid, chip_type)
print '\n\n', 10*'PNG '
mapping.convert_chip_to_hex(full_map_fid, chip_type)
os.system("cp %s %s" % (fullmap_fid[:-4]+'full', fullmap_path+'currentchip.full'))
print 10*'Done ', '\n'
def moveto(place):
print 5 * (place + ' ')
chip_type = caget(pv.me14e_gp1)
print 'CHIP TYPE', chip_type
if chip_type == 0:
print 'Toronto Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, +18.975)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, +21.375)
elif chip_type == 1:
print 'Oxford Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, 25.40)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 25.40)
elif chip_type == 2:
print 'Hamburg Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
#caput(pv.me14e_stage_x, +17.16)
caput(pv.me14e_stage_x, +24.968)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
#caput(pv.me14e_stage_y, -26.49)
caput(pv.me14e_stage_y, +24.968)
elif chip_type == 3:
print 'Oxford Inner Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, 24.60)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 24.60)
elif chip_type == 5:
print 'Regina Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, +17.175)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, +17.175)
else:
print 'Unknown chip_type move'
# Non Chip Specific Move
if place == 'zero':
caput(pv.me14e_pmac_str, '!x0y0z0')
elif place == 'yag':
caput(pv.me14e_stage_x, 1.0)
caput(pv.me14e_stage_y, 1.0)
caput(pv.me14e_stage_z, 1.0)
elif place == 'load_position':
print 'load position'
caput(pv.me14e_filter, -25)
caput(pv.me14e_stage_x, -25.0)
caput(pv.me14e_stage_y, -25.0)
caput(pv.me14e_stage_z, 0.0)
caput(pv.me14e_pmac_str, 'M512=0 M511=1')
#caput(pv.absb_mp_select, 'Robot')
#caput(pv.ap1_mp_select, 'Robot')
#caput(pv.blight_mp_select, 'Out')
#caput(pv.det_z, 1480)
elif place == 'collect_position':
print 'collect position'
caput(pv.me14e_filter, 25)
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
caput(pv.me14e_stage_z, 0.0)
caput(pv.me14e_pmac_str, 'M512=0 M511=1')
#caput(pv.absb_mp_select, 'Data Collection')
#caput(pv.ap1_mp_select, 'In')
#caput(pv.blight_mp_select, 'In')
elif place == 'lightin':
print 'light in'
caput(pv.me14e_filter, 25)
elif place == 'lightout':
print 'light out'
caput(pv.me14e_filter, -25)
elif place == 'flipperin':
##### nb need M508=100 M509 =150 somewhere
caput(pv.me14e_pmac_str, 'M512=0 M511=1')
elif place == 'flipperout':
caput(pv.me14e_pmac_str, ' M512=1 M511=1')
def scrape_mtr_directions():
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(param_path + 'motor_direction.txt', 'r')
mtr1_dir, mtr2_dir, mtr3_dir = 1,1,1
for line in f.readlines():
if line.startswith('mtr1'):
mtr1_dir = float(int(line.split('=')[1]))
elif line.startswith('mtr2'):
mtr2_dir = float(int(line.split('=')[1]))
elif line.startswith('mtr3'):
mtr3_dir = float(int(line.split('=')[1]))
else:
continue
f.close()
return mtr1_dir, mtr2_dir, mtr3_dir
def fiducial(point):
scale = 10000.0
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
mtr1_dir, mtr2_dir, mtr3_dir = scrape_mtr_directions()
rbv_1 = caget(pv.me14e_stage_x + '.RBV')
rbv_2 = caget(pv.me14e_stage_y + '.RBV')
rbv_3 = caget(pv.me14e_stage_z + '.RBV')
raw_1 = caget(pv.me14e_stage_x + '.RRBV')
raw_2 = caget(pv.me14e_stage_y + '.RRBV')
raw_3 = caget(pv.me14e_stage_z + '.RRBV')
"""
June 8th 2017 change from this to rbv
f_x = (mtr1_dir*raw_1) / scale
f_y = (mtr2_dir*raw_2) / scale
f_z = (mtr3_dir*raw_3) / scale
"""
f_x = rbv_1
f_y = rbv_2
f_z = rbv_3
print '\nWriting Fiducial File', 20*('%s ' %point)
print 'MTR\tRBV\tRAW\tDirect.\tf_value'
print 'MTR1\t%1.4f\t%i\t%i\t%1.4f' % (rbv_1, raw_1, mtr1_dir, f_x)
print 'MTR2\t%1.4f\t%i\t%i\t%1.4f' % (rbv_2, raw_2, mtr2_dir, f_y)
print 'MTR3\t%1.4f\t%i\t%i\t%1.4f' % (rbv_3, raw_3, mtr3_dir, f_z)
print 'Writing Fiducial File', 20*('%s ' %point)
f = open(param_path + 'fiducial_%s.txt' %point, 'w')
f.write('MTR\tRBV\tRAW\tCorr\tf_value\n')
f.write('MTR1\t%1.4f\t%i\t%i\t%1.4f\n' % (rbv_1, raw_1, mtr1_dir, f_x))
f.write('MTR2\t%1.4f\t%i\t%i\t%1.4f\n' % (rbv_2, raw_2, mtr2_dir, f_y))
f.write('MTR3\t%1.4f\t%i\t%i\t%1.4f' % (rbv_3, raw_3, mtr3_dir, f_z))
f.close()
print 10*'Done '
def scrape_mtr_fiducials(point):
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(param_path+'fiducial_%i.txt' %point,'r')
f_lines = f.readlines()[1:]
f_x = float(f_lines[0].rsplit()[4])
f_y = float(f_lines[1].rsplit()[4])
f_z = float(f_lines[2].rsplit()[4])
f.close()
return f_x, f_y, f_z
def cs_maker():
chip_type = caget(pv.me14e_gp1)
fiducial_dict = {}
fiducial_dict[0] = [18.975, 21.375]
fiducial_dict[1] = [25.400, 25.400]
fiducial_dict[2] = [24.968, 24.968]
fiducial_dict[3] = [24.600, 24.600]
fiducial_dict[4] = [27.500, 27.500]
fiducial_dict[5] = [17.175, 17.175]
print chip_type, fiducial_dict[chip_type]
mtr1_dir, mtr2_dir, mtr3_dir = scrape_mtr_directions()
f1_x, f1_y, f1_z = scrape_mtr_fiducials(1)
f2_x, f2_y, f2_z = scrape_mtr_fiducials(2)
print 'AAAAAAAAAAAAAAAAABBBBBBBBBBBBBB'
print 'mtr1 direction', mtr1_dir
print 'mtr2 direction', mtr2_dir
print 'mtr3 direction', mtr3_dir
"""
Theory
Rx: rotation about X-axis, pitch
Ry: rotation about Y-axis, yaw
Rz: rotation about Z-axis, roll
The order of rotation is Roll->Yaw->Pitch (Rx*Ry*Rz)
Rx Ry Rz
|1 0 0| | Cy 0 Sy| |Cz -Sz 0| | CyCz -CxSz Sy |
|0 Cx -Sx|*| 0 1 0|*|Sz Cz 0| = | SxSyCz+CxSz -SxSySz+CxCz -SxCy|
|0 Sx Cx| |-Sy 0 Cy| | 0 0 1| |-CxSyCz+SxSz CxSySz+SxCz CxCy|
BELOW iS TEST TEST (CLOCKWISE)
Rx Ry Rz
|1 0 0| | Cy 0 -Sy| |Cz Sz 0| | CyCz CxSz -Sy |
|0 Cx Sx|*| 0 1 0|*|-Sz Cz 0| = | SxSyCz-CxSz SxSySz+CxCz SxCy|
|0 -Sx Cx| | Sy 0 Cy| | 0 0 1| | CxSyCz+SxSz CxSySz-SxCz CxCy|
"""
# Rotation Around Z #
# If stages upsidedown (I24) change sign of Sz
Sz1 = f1_y / fiducial_dict[chip_type][0]
Sz2 = -1 * (f2_x / fiducial_dict[chip_type][1])
Sz = ((Sz1 + Sz2) / 2)
Cz = np.sqrt((1 - Sz**2))
print 'Sz1 , %1.4f, %1.4f' % (Sz1, np.degrees(np.arcsin(Sz1)))
print 'Sz2 , %1.4f, %1.4f' % (Sz2, np.degrees(np.arcsin(Sz2)))
print 'Sz , %1.4f, %1.4f' % (Sz, np.degrees(np.arcsin(Sz)))
print 'Cz , %1.4f, %1.4f\n' % (Cz, np.degrees(np.arccos(Cz)))
# Rotation Around Y #
Sy = f1_z / fiducial_dict[chip_type][0]
Cy = np.sqrt((1 - Sy**2))
print 'Sy , %1.4f, %1.4f' % (Sy, np.degrees(np.arcsin(Sy)))
print 'Cy , %1.4f, %1.4f\n' % (Cy, np.degrees(np.arccos(Cy)))
# Rotation Around X #
# If stages upsidedown (I24) change sign of Sx
Sx = -1* f2_z / fiducial_dict[chip_type][1]
Cx = np.sqrt((1 - Sx**2))
print 'Sx , %1.4f, %1.4f' % (Sx, np.degrees(np.arcsin(Sx)))
print 'Cx , %1.4f, %1.4f\n' % (Cx, np.degrees(np.arccos(Cx)))
# Crucifix 1: In normal orientation (sat on table facing away)
# X=0.0000 , Y=0.0000, Z=0.0001000 (mm/cts for MRES and ERES)
#scalex,scaley,scalez = 10010.0, 10000.0, 10000.0
# Crucifix 1: In beamline position (upside down facing away)
# X=0.000099896 , Y=0.000099983, Z=0.0001000 (mm/cts for MRES and ERES)
scalex, scaley, scalez = 10010.4, 10001.7, 10000.0
# Crucifix 2: In normal orientation (sat on table facing away)
# X=0.0000999 , Y=0.00009996, Z=0.0001000 (mm/cts for MRES and ERES)
#scalex,scaley,scalez = 10010.0, 10004.0, 10000.0
# Temple 1: In normal orientation (sat on table facing away)
# X=0.0000 , Y=0.0000, Z=0.0001000 (mm/cts for MRES and ERES)
#scalex,scaley,scalez = 10008.0, 10002.0, 10000.0
#minus signs added Aug17 in lab 30 preparing for sacla
#added to y1factor x2factor
x1factor = mtr1_dir * scalex * (Cy * Cz)
y1factor = mtr2_dir * scaley * (-1. * Cx * Sz)
z1factor = mtr3_dir * scalez * Sy
x2factor = mtr1_dir * scalex * ((Sx*Sy*Cz) + (Cx*Sz))
y2factor = mtr2_dir * scaley * ((Cx*Cz) - (Sx*Sy*Sz))
z2factor = mtr3_dir * scalez * (-1. * Sx * Cy)
x3factor = mtr1_dir * scalex * ((Sx*Sz) - (Cx*Sy*Cz))
y3factor = mtr2_dir * scaley * ((Cx*Sy*Sz) + (Sx*Cz))
z3factor = mtr3_dir * scalez * (Cx* Cy)
"""
Rx Ry Rz
|1 0 0| | Cy 0 Sy| |Cz -Sz 0| | CyCz -CxSz Sy |
|0 Cx -Sx|*| 0 1 0|*|Sz Cz 0| = | SxSyCz+CxSz -SxSySz+CxCz -SxCy|
|0 Sx Cx| |-Sy 0 Cy| | 0 0 1| |-CxSyCz+SxSz CxSySz+SxCz CxCy|
"""
# skew is the difference between the Sz1 and Sz2 after rotation is taken out.
# this should be measured in situ prior to expriment
# In situ is measure by hand using opposite and adjacent RBV after calibration of
# scale factors
#print 10*'WARNING\n', '\nHave you calculated skew?\n\n', 10*'WARNING\n'
# Crucifix 1 on table
#skew = -0.187
# Crucifix 1 on beamline
#skew = -0.1568
skew = 0.1863
# Crucifix 2
#skew = 0.060
# Temple 1
#skew = 0.02
print 'Skew being used is: %1.4f' %skew
s1 = np.degrees(np.arcsin(Sz1))
s2 = np.degrees(np.arcsin(Sz2))
rot = np.degrees(np.arcsin((Sz1+Sz2) / 2))
calc_skew = ((s1-rot) - (s2-rot))
print 's1:%1.4f s2:%1.4f rot:%1.4f' %(s1, s2, rot)
print 'Calculated rotation from current fiducials is: %1.4f' %rot
print 'Calculated skew from current fiducials is: %1.4f' %calc_skew
#skew = calc_skew
sinD = np.sin((skew/2) * (np.pi/180))
cosD = np.cos((skew/2) * (np.pi/180))
new_x1factor = (x1factor * cosD) + (y1factor * sinD)
new_y1factor = (x1factor * sinD) + (y1factor * cosD)
new_x2factor = (x2factor * cosD) + (y2factor * sinD)
new_y2factor = (x2factor * sinD) + (y2factor * cosD)
cs1 = "#1->%+1.3fX%+1.3fY%+1.3fZ" % (new_x1factor, new_y1factor, z1factor)
cs2 = "#2->%+1.3fX%+1.3fY%+1.3fZ" % (new_x2factor, new_y2factor, z2factor)
cs3 = "#3->%+1.3fX%+1.3fY%+1.3fZ" % (x3factor, y3factor, z3factor)
print '\n'.join([cs1, cs2, cs3])
print 'These should be 1. This is the sum of the squares of the factors divided by their scale'
print np.sqrt(x1factor**2 + y1factor**2 + z1factor**2) / scalex
print np.sqrt(x2factor**2 + y2factor**2 + z2factor**2) / scaley
print np.sqrt(x3factor**2 + y3factor**2 + z3factor**2) / scalez
print 'Long wait, please be patient'
caput(pv.me14e_pmac_str, '!x0y0z0')
sleep(2.5)
caput(pv.me14e_pmac_str, '&2')
caput(pv.me14e_pmac_str, cs1)
caput(pv.me14e_pmac_str, cs2)
caput(pv.me14e_pmac_str, cs3)
caput(pv.me14e_pmac_str, '!x0y0z0')
sleep(0.1)
caput(pv.me14e_pmac_str, '#1hmz#2hmz#3hmz')
sleep(0.1)
print 5*'chip_type',type(chip_type)
# NEXT THREE LINES COMMENTED OUT FOR CS TESTS 5 JUNE
if str(chip_type) =='1':
caput(pv.me14e_pmac_str, '!x0.4y0.4')
sleep(0.1)
caput(pv.me14e_pmac_str, '#1hmz#2hmz#3hmz')
print 10*'CSDone '
else:
caput(pv.me14e_pmac_str, '#1hmz#2hmz#3hmz')
print 10*'CSDone '
def cs_reset():
cs1 = "#1->%+10000X%+0Y%+0Z"
cs2 = "#2->%+0X%+10000Y%+0Z"
cs3 = "#3->0X+0Y+10000Z"
print '\n'.join([cs1, cs2, cs3])
caput(pv.me14e_pmac_str, '&2')
sleep(0.5)
caput(pv.me14e_pmac_str, cs1)
sleep(0.5)
caput(pv.me14e_pmac_str, cs2)
sleep(0.5)
caput(pv.me14e_pmac_str, cs3)
print 10*'CSDone '
def main(args):
if args[1] == 'initialise':
initialise()
elif args[1] == 'pvar_test':
chipid = args[2]
pvar_test(chipid)
elif args[1] == 'moveto':
moveto(args[2])
elif args[1] == 'fiducial':
fiducial(args[2])
elif args[1] == 'cs_maker':
cs_maker()
elif args[1] == 'write_parameter_file':
write_parameter_file()
startup.run()
elif args[1] == 'define_current_chip':
chipid = args[2]
define_current_chip(chipid)
elif args[1] == 'load_stock_map':
map_choice = args[2]
load_stock_map(map_choice)
elif args[1] == 'load_lite_map':
load_lite_map()
elif args[1] == 'load_full_map':
load_full_map()
elif args[1] == 'save_screen_map':
save_screen_map()
elif args[1] == 'upload_full':
upload_full()
elif args[1] == 'upload_parameters':
chipid = args[2]
upload_parameters(chipid)
elif args[1] == 'cs_reset':
cs_reset()
else:
print 'Unknown Command'
if __name__ == '__main__':
main(sys.argv)
| 36.952818
| 171
| 0.563043
|
import pv, os, re, sys
import math, time, string
import numpy as np
from time import sleep
from ca import caput, caget
import logging as lg
import sacla3_Chip_StartUp_v7 as startup
import sacla3_Chip_Mapping_v7 as mapping
lg.basicConfig(format='%(asctime)s %(levelname)s: \t%(message)s',level=lg.DEBUG, filename='SACLA3v7.log')
get(pv.me14e_gp3)
f.write('n_exposures \t%s\n' %n_exposures)
print 'n_exposures', n_exposures
chip_type = caget(pv.me14e_gp1)
)
f.write('map_type \t%s\n' %map_type)
print 'map_type', map_type
f.close()
print '\n', 10*'set', '\n\n'
def define_current_chip(chipid):
load_stock_map('clear')
"""
Not sure what this is for:
print 'Setting Mapping Type to Lite'
caput(pv.me14e_gp2, 1)
"""
chip_type = caget(pv.me14e_gp1)
print chip_type, chipid
if chipid == 'toronto':
caput(pv.me14e_gp1, 0)
elif chipid == 'oxford':
caput(pv.me14e_gp1, 1)
elif chipid == 'hamburg':
caput(pv.me14e_gp1, 2)
elif chipid == 'hamburgfull':
caput(pv.me14e_gp1, 2)
elif chipid == 'bismuth1':
caput(pv.me14e_gp1, 3)
elif chipid == 'bismuth2':
caput(pv.me14e_gp1, 4)
elif chipid == 'regina':
caput(pv.me14e_gp1, 5)
param_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(param_path + chipid + '.pvar', 'r')
for line in f.readlines():
s = line.rstrip('\n')
print s
if line.startswith('#'):
continue
caput(pv.me14e_pmac_str, s)
print param_path + chipid + '.chip'
print 10*'Done '
def save_screen_map():
litemap_path = '/localhome/local/Documents/sacla/parameter_files/'
print '\n\nSaving', litemap_path + 'currentchip.map'
f = open(litemap_path + 'currentchip.map','w')
print 'Printing only blocks with block_val == 1'
for x in range(1, 82):
block_str = 'ME14E-MO-IOC-01:GP%i' %(x+10)
block_val = caget(block_str)
if block_val == 1:
print block_str, block_val
line = '%02dstatus P3%02d1 \t%s\n' %(x, x, block_val)
f.write(line)
f.close()
print 10*'Done '
return 0
def upload_parameters(chipid):
if chipid == 'toronto':
caput(pv.me14e_gp1, 0)
width = 9
elif chipid == 'oxford':
caput(pv.me14e_gp1, 1)
width = 8
elif chipid == 'hamburg':
caput(pv.me14e_gp1, 2)
width = 3
elif chipid == 'bismuth1':
caput(pv.me14e_gp1, 3)
width = 1
elif chipid == 'bismuth2':
caput(pv.me14e_gp1, 4)
width = 7
elif chipid == 'regina':
caput(pv.me14e_gp1, 5)
width = 7
litemap_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(litemap_path + 'currentchip.map','r')
print 'chipid', chipid
print width
x = 1
for line in f.readlines()[:width**2]:
cols = line.split( )
pvar = cols[1]
value = cols[2]
s = pvar +'='+ value
if value != '1':
s2 = pvar + ' '
sys.stdout.write(s2)
else:
sys.stdout.write(s+' ')
sys.stdout.flush()
if x == width:
print
x = 1
else:
x += 1
caput(pv.me14e_pmac_str, s)
sleep(0.02)
print
print 'Setting Mapping Type to Lite'
caput(pv.me14e_gp2, 1)
print 10*'Done '
def upload_full():
fullmap_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(fullmap_path + 'currentchip.full', 'r').readlines()
for x in range(len(f) / 2):
pmac_list = []
for i in range(2):
pmac_list.append(f.pop(0).rstrip('\n'))
writeline = " ".join(pmac_list)
print writeline
caput(pv.me14e_pmac_str, writeline)
sleep(0.02)
print 10*'Done '
def load_stock_map(map_choice):
print 'Please wait, adjusting lite map'
r33 = [19,18,17,26,31,32,33,24,25]
r55 = [9,10,11,12,13,16,27,30,41,40,39,38,37,34,23,20] + r33
r77 = [7,6,5,4,3,2,1,14,15,28,29,42,43,44,45,46,47,48,49,36,35,22,21,8] + r55
h33 = [3,2,1,6,7,8,9,4,5]
x33 = [31,32,33,40,51,50,49,42,41]
x55 = [25,24,23,22,21,34,39,52,57,58,59,60,61,48,43,30] + x33
x77 = [11,12,13,14,15,16,17,20,35,38,53,56,71,70,69,68,67,66,65,62,47,44,29,26] + x55
x99 = [9,8,7,6,5,4,3,2,1,18,19,36,37,54,55,72,73,74,75,76,77,78,79,80,81,64,63,46,45,28,27,10] + x77
x44 = [22,21,20,19,30,35,46,45,44,43,38,27,28,29,36,37]
x49 = [x+1 for x in range(49)]
x66 = [10,11,12,13,14,15,18,31,34,47,50,51,52,53,54,55,42,39,26,23] + x44
x88 = [8,7,6,5,4,3,2,1,16,17,32,33,48,49,64,63,62,61,60,59,58,57,56,41,40,25,24,9] + x66
map_dict = {}
map_dict['clear']= [1]
map_dict['r33'] = r33
map_dict['r55'] = r55
map_dict['r77'] = r77
map_dict['h33'] = h33
map_dict['x33'] = x33
map_dict['x44'] = x44
map_dict['x49'] = x49
map_dict['x55'] = x55
map_dict['x66'] = x66
map_dict['x77'] = x77
map_dict['x88'] = x88
map_dict['x99'] = x99
print 'Clearing'
for i in range(1, 82):
pvar = 'ME14E-MO-IOC-01:GP' + str(i + 10)
caput(pvar, 0)
sys.stdout.write('.')
sys.stdout.flush()
print '\nmap cleared'
print 'loading map_choice', map_choice
for i in map_dict[map_choice]:
pvar = 'ME14E-MO-IOC-01:GP' + str(i + 10)
caput(pvar, 1)
print 10*'Done '
def load_lite_map():
load_stock_map('clear')
toronto_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03', 'A4':'04', 'A5':'05', 'A6':'06','A7':'07', 'A8':'08', 'A9':'09'
,'B1':'18', 'B2':'17', 'B3':'16', 'B4':'15', 'B5':'14', 'B6':'13','B7':'12', 'B8':'11', 'B9':'10'
,'C1':'19', 'C2':'20', 'C3':'21', 'C4':'22', 'C5':'23', 'C6':'24','C7':'25', 'C8':'26', 'C9':'27'
,'D1':'36', 'D2':'35', 'D3':'34', 'D4':'33', 'D5':'32', 'D6':'31','D7':'30', 'D8':'29', 'D9':'28'
,'E1':'37', 'E2':'38', 'E3':'39', 'E4':'40', 'E5':'41', 'E6':'42','E7':'43', 'E8':'44', 'E9':'45'
,'F1':'54', 'F2':'53', 'F3':'52', 'F4':'51', 'F5':'50', 'F6':'49','F7':'48', 'F8':'47', 'F9':'46'
,'G1':'55', 'G2':'56', 'G3':'57', 'G4':'58', 'G5':'59', 'G6':'60','G7':'61', 'G8':'62', 'G9':'63'
,'H1':'72', 'H2':'71', 'H3':'70', 'H4':'69', 'H5':'68', 'H6':'67','H7':'66', 'H8':'65', 'H9':'64'
,'I1':'73', 'I2':'74', 'I3':'75', 'I4':'76', 'I5':'77', 'I6':'78','I7':'79', 'I8':'80', 'I9':'81'}
oxford_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03', 'A4':'04', 'A5':'05', 'A6':'06','A7':'07', 'A8':'08'
,'B1':'16', 'B2':'15', 'B3':'14', 'B4':'13', 'B5':'12', 'B6':'11','B7':'10', 'B8':'09'
,'C1':'17', 'C2':'18', 'C3':'19', 'C4':'20', 'C5':'21', 'C6':'22','C7':'23', 'C8':'24'
,'D1':'32', 'D2':'31', 'D3':'30', 'D4':'29', 'D5':'28', 'D6':'27','D7':'26', 'D8':'25'
,'E1':'33', 'E2':'34', 'E3':'35', 'E4':'36', 'E5':'37', 'E6':'38','E7':'39', 'E8':'40'
,'F1':'48', 'F2':'47', 'F3':'46', 'F4':'45', 'F5':'44', 'F6':'43','F7':'42', 'F8':'41'
,'G1':'49', 'G2':'50', 'G3':'51', 'G4':'52', 'G5':'53', 'G6':'54','G7':'55', 'G8':'56'
,'H1':'64', 'H2':'63', 'H3':'62', 'H4':'61', 'H5':'60', 'H6':'59','H7':'58', 'H8':'57'}
regina_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03', 'A4':'04', 'A5':'05', 'A6':'06','A7':'07'
,'B1':'14', 'B2':'13', 'B3':'12', 'B4':'11', 'B5':'10', 'B6':'09','B7':'08'
,'C1':'15', 'C2':'16', 'C3':'17', 'C4':'18', 'C5':'19', 'C6':'20','C7':'21'
,'D1':'28', 'D2':'27', 'D3':'26', 'D4':'25', 'D5':'24', 'D6':'23','D7':'22'
,'E1':'29', 'E2':'30', 'E3':'31', 'E4':'32', 'E5':'33', 'E6':'34','E7':'35'
,'F1':'42', 'F2':'41', 'F3':'40', 'F4':'39', 'F5':'38', 'F6':'37','F7':'36'
,'G1':'43', 'G2':'44', 'G3':'45', 'G4':'46', 'G5':'47', 'G6':'48','G7':'49'}
hamburg_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03'
,'B1':'06', 'B2':'05', 'B3':'04'
,'C1':'07', 'C2':'08', 'C3':'09'}
chip_type = caget(pv.me14e_gp1)
if chip_type == 0:
print 'Toronto Block Order'
block_dict = toronto_block_dict
elif chip_type == 1:
print 'Oxford Block Order'
rows = ['A','B','C','D','E','F','G','H']
columns = list(range(1,9))
btn_names = {}
flip = True
for x, column in enumerate(columns):
for y,row in enumerate(rows):
i=x*8+y
if i%8 == 0 and flip == False:
flip = True
z = 8 - (y+1)
elif i%8 == 0 and flip == True:
flip = False
z = y
elif flip == False:
z = y
elif flip == True:
z = 8 - (y+1)
else:
print('something is wrong with chip grid creation')
break
button_name = str(row)+str(column)
lab_num = x*8+z
label='%02.d'%(lab_num+1)
btn_names[button_name] = label
block_dict = btn_names
elif chip_type == 2:
print 'Hamburg Block Order'
block_dict = hamburg_block_dict
elif chip_type == 5:
print 'Regina Block Order'
block_dict = regina_block_dict
litemap_path = '/localhome/local/Documents/sacla/parameter_files/'
litemap_fid = str(caget(pv.me14e_gp5)) + '.lite'
print 'opening', litemap_path + litemap_fid
f = open(litemap_path + litemap_fid, 'r')
print 'please wait, loading LITE map'
for line in f.readlines():
entry = line.split()
block_name = entry[0]
yesno = entry[1]
block_num = block_dict[block_name]
pvar = 'ME14E-MO-IOC-01:GP' + str(int(block_num) + 10)
print block_name, yesno, pvar
caput(pvar, yesno)
print 10*'Done '
def load_full_map(location ='SACLA'):
if location == 'i24':
chip_name, visit, sub_dir, n_exposures, chip_type, map_type = startup.scrape_parameter_file(location)
else:
chip_name, sub_dir, n_exposures, chip_type, map_type = startup.scrape_parameter_file(location)
fullmap_path = '/localhome/local/Documents/sacla/parameter_files/'
fullmap_fid = fullmap_path + str(caget(pv.me14e_gp5)) + '.spec'
print 'opening', fullmap_fid
mapping.plot_file(fullmap_fid, chip_type)
print '\n\n', 10*'PNG '
mapping.convert_chip_to_hex(full_map_fid, chip_type)
os.system("cp %s %s" % (fullmap_fid[:-4]+'full', fullmap_path+'currentchip.full'))
print 10*'Done ', '\n'
def moveto(place):
print 5 * (place + ' ')
chip_type = caget(pv.me14e_gp1)
print 'CHIP TYPE', chip_type
if chip_type == 0:
print 'Toronto Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, +18.975)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, +21.375)
elif chip_type == 1:
print 'Oxford Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, 25.40)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 25.40)
elif chip_type == 2:
print 'Hamburg Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, +24.968)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, +24.968)
elif chip_type == 3:
print 'Oxford Inner Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, 24.60)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 24.60)
elif chip_type == 5:
print 'Regina Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, +17.175)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, +17.175)
else:
print 'Unknown chip_type move'
if place == 'zero':
caput(pv.me14e_pmac_str, '!x0y0z0')
elif place == 'yag':
caput(pv.me14e_stage_x, 1.0)
caput(pv.me14e_stage_y, 1.0)
caput(pv.me14e_stage_z, 1.0)
elif place == 'load_position':
print 'load position'
caput(pv.me14e_filter, -25)
caput(pv.me14e_stage_x, -25.0)
caput(pv.me14e_stage_y, -25.0)
caput(pv.me14e_stage_z, 0.0)
caput(pv.me14e_pmac_str, 'M512=0 M511=1')
elif place == 'collect_position':
print 'collect position'
caput(pv.me14e_filter, 25)
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
caput(pv.me14e_stage_z, 0.0)
caput(pv.me14e_pmac_str, 'M512=0 M511=1')
elif place == 'lightin':
print 'light in'
caput(pv.me14e_filter, 25)
elif place == 'lightout':
print 'light out'
caput(pv.me14e_filter, -25)
elif place == 'flipperin':
s():
param_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(param_path + 'motor_direction.txt', 'r')
mtr1_dir, mtr2_dir, mtr3_dir = 1,1,1
for line in f.readlines():
if line.startswith('mtr1'):
mtr1_dir = float(int(line.split('=')[1]))
elif line.startswith('mtr2'):
mtr2_dir = float(int(line.split('=')[1]))
elif line.startswith('mtr3'):
mtr3_dir = float(int(line.split('=')[1]))
else:
continue
f.close()
return mtr1_dir, mtr2_dir, mtr3_dir
def fiducial(point):
scale = 10000.0
param_path = '/localhome/local/Documents/sacla/parameter_files/'
mtr1_dir, mtr2_dir, mtr3_dir = scrape_mtr_directions()
rbv_1 = caget(pv.me14e_stage_x + '.RBV')
rbv_2 = caget(pv.me14e_stage_y + '.RBV')
rbv_3 = caget(pv.me14e_stage_z + '.RBV')
raw_1 = caget(pv.me14e_stage_x + '.RRBV')
raw_2 = caget(pv.me14e_stage_y + '.RRBV')
raw_3 = caget(pv.me14e_stage_z + '.RRBV')
"""
June 8th 2017 change from this to rbv
f_x = (mtr1_dir*raw_1) / scale
f_y = (mtr2_dir*raw_2) / scale
f_z = (mtr3_dir*raw_3) / scale
"""
f_x = rbv_1
f_y = rbv_2
f_z = rbv_3
print '\nWriting Fiducial File', 20*('%s ' %point)
print 'MTR\tRBV\tRAW\tDirect.\tf_value'
print 'MTR1\t%1.4f\t%i\t%i\t%1.4f' % (rbv_1, raw_1, mtr1_dir, f_x)
print 'MTR2\t%1.4f\t%i\t%i\t%1.4f' % (rbv_2, raw_2, mtr2_dir, f_y)
print 'MTR3\t%1.4f\t%i\t%i\t%1.4f' % (rbv_3, raw_3, mtr3_dir, f_z)
print 'Writing Fiducial File', 20*('%s ' %point)
f = open(param_path + 'fiducial_%s.txt' %point, 'w')
f.write('MTR\tRBV\tRAW\tCorr\tf_value\n')
f.write('MTR1\t%1.4f\t%i\t%i\t%1.4f\n' % (rbv_1, raw_1, mtr1_dir, f_x))
f.write('MTR2\t%1.4f\t%i\t%i\t%1.4f\n' % (rbv_2, raw_2, mtr2_dir, f_y))
f.write('MTR3\t%1.4f\t%i\t%i\t%1.4f' % (rbv_3, raw_3, mtr3_dir, f_z))
f.close()
print 10*'Done '
def scrape_mtr_fiducials(point):
param_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(param_path+'fiducial_%i.txt' %point,'r')
f_lines = f.readlines()[1:]
f_x = float(f_lines[0].rsplit()[4])
f_y = float(f_lines[1].rsplit()[4])
f_z = float(f_lines[2].rsplit()[4])
f.close()
return f_x, f_y, f_z
def cs_maker():
chip_type = caget(pv.me14e_gp1)
fiducial_dict = {}
fiducial_dict[0] = [18.975, 21.375]
fiducial_dict[1] = [25.400, 25.400]
fiducial_dict[2] = [24.968, 24.968]
fiducial_dict[3] = [24.600, 24.600]
fiducial_dict[4] = [27.500, 27.500]
fiducial_dict[5] = [17.175, 17.175]
print chip_type, fiducial_dict[chip_type]
mtr1_dir, mtr2_dir, mtr3_dir = scrape_mtr_directions()
f1_x, f1_y, f1_z = scrape_mtr_fiducials(1)
f2_x, f2_y, f2_z = scrape_mtr_fiducials(2)
print 'AAAAAAAAAAAAAAAAABBBBBBBBBBBBBB'
print 'mtr1 direction', mtr1_dir
print 'mtr2 direction', mtr2_dir
print 'mtr3 direction', mtr3_dir
"""
Theory
Rx: rotation about X-axis, pitch
Ry: rotation about Y-axis, yaw
Rz: rotation about Z-axis, roll
The order of rotation is Roll->Yaw->Pitch (Rx*Ry*Rz)
Rx Ry Rz
|1 0 0| | Cy 0 Sy| |Cz -Sz 0| | CyCz -CxSz Sy |
|0 Cx -Sx|*| 0 1 0|*|Sz Cz 0| = | SxSyCz+CxSz -SxSySz+CxCz -SxCy|
|0 Sx Cx| |-Sy 0 Cy| | 0 0 1| |-CxSyCz+SxSz CxSySz+SxCz CxCy|
BELOW iS TEST TEST (CLOCKWISE)
Rx Ry Rz
|1 0 0| | Cy 0 -Sy| |Cz Sz 0| | CyCz CxSz -Sy |
|0 Cx Sx|*| 0 1 0|*|-Sz Cz 0| = | SxSyCz-CxSz SxSySz+CxCz SxCy|
|0 -Sx Cx| | Sy 0 Cy| | 0 0 1| | CxSyCz+SxSz CxSySz-SxCz CxCy|
"""
Sz1 = f1_y / fiducial_dict[chip_type][0]
Sz2 = -1 * (f2_x / fiducial_dict[chip_type][1])
Sz = ((Sz1 + Sz2) / 2)
Cz = np.sqrt((1 - Sz**2))
print 'Sz1 , %1.4f, %1.4f' % (Sz1, np.degrees(np.arcsin(Sz1)))
print 'Sz2 , %1.4f, %1.4f' % (Sz2, np.degrees(np.arcsin(Sz2)))
print 'Sz , %1.4f, %1.4f' % (Sz, np.degrees(np.arcsin(Sz)))
print 'Cz , %1.4f, %1.4f\n' % (Cz, np.degrees(np.arccos(Cz)))
Sy = f1_z / fiducial_dict[chip_type][0]
Cy = np.sqrt((1 - Sy**2))
print 'Sy , %1.4f, %1.4f' % (Sy, np.degrees(np.arcsin(Sy)))
print 'Cy , %1.4f, %1.4f\n' % (Cy, np.degrees(np.arccos(Cy)))
Sx = -1* f2_z / fiducial_dict[chip_type][1]
Cx = np.sqrt((1 - Sx**2))
print 'Sx , %1.4f, %1.4f' % (Sx, np.degrees(np.arcsin(Sx)))
print 'Cx , %1.4f, %1.4f\n' % (Cx, np.degrees(np.arccos(Cx)))
scalex, scaley, scalez = 10010.4, 10001.7, 10000.0
x1factor = mtr1_dir * scalex * (Cy * Cz)
y1factor = mtr2_dir * scaley * (-1. * Cx * Sz)
z1factor = mtr3_dir * scalez * Sy
x2factor = mtr1_dir * scalex * ((Sx*Sy*Cz) + (Cx*Sz))
y2factor = mtr2_dir * scaley * ((Cx*Cz) - (Sx*Sy*Sz))
z2factor = mtr3_dir * scalez * (-1. * Sx * Cy)
x3factor = mtr1_dir * scalex * ((Sx*Sz) - (Cx*Sy*Cz))
y3factor = mtr2_dir * scaley * ((Cx*Sy*Sz) + (Sx*Cz))
z3factor = mtr3_dir * scalez * (Cx* Cy)
"""
Rx Ry Rz
|1 0 0| | Cy 0 Sy| |Cz -Sz 0| | CyCz -CxSz Sy |
|0 Cx -Sx|*| 0 1 0|*|Sz Cz 0| = | SxSyCz+CxSz -SxSySz+CxCz -SxCy|
|0 Sx Cx| |-Sy 0 Cy| | 0 0 1| |-CxSyCz+SxSz CxSySz+SxCz CxCy|
"""
skew = 0.1863
print 'Skew being used is: %1.4f' %skew
s1 = np.degrees(np.arcsin(Sz1))
s2 = np.degrees(np.arcsin(Sz2))
rot = np.degrees(np.arcsin((Sz1+Sz2) / 2))
calc_skew = ((s1-rot) - (s2-rot))
print 's1:%1.4f s2:%1.4f rot:%1.4f' %(s1, s2, rot)
print 'Calculated rotation from current fiducials is: %1.4f' %rot
print 'Calculated skew from current fiducials is: %1.4f' %calc_skew
sinD = np.sin((skew/2) * (np.pi/180))
cosD = np.cos((skew/2) * (np.pi/180))
new_x1factor = (x1factor * cosD) + (y1factor * sinD)
new_y1factor = (x1factor * sinD) + (y1factor * cosD)
new_x2factor = (x2factor * cosD) + (y2factor * sinD)
new_y2factor = (x2factor * sinD) + (y2factor * cosD)
cs1 = "#1->%+1.3fX%+1.3fY%+1.3fZ" % (new_x1factor, new_y1factor, z1factor)
cs2 = "#2->%+1.3fX%+1.3fY%+1.3fZ" % (new_x2factor, new_y2factor, z2factor)
cs3 = "#3->%+1.3fX%+1.3fY%+1.3fZ" % (x3factor, y3factor, z3factor)
print '\n'.join([cs1, cs2, cs3])
print 'These should be 1. This is the sum of the squares of the factors divided by their scale'
print np.sqrt(x1factor**2 + y1factor**2 + z1factor**2) / scalex
print np.sqrt(x2factor**2 + y2factor**2 + z2factor**2) / scaley
print np.sqrt(x3factor**2 + y3factor**2 + z3factor**2) / scalez
print 'Long wait, please be patient'
caput(pv.me14e_pmac_str, '!x0y0z0')
sleep(2.5)
caput(pv.me14e_pmac_str, '&2')
caput(pv.me14e_pmac_str, cs1)
caput(pv.me14e_pmac_str, cs2)
caput(pv.me14e_pmac_str, cs3)
caput(pv.me14e_pmac_str, '!x0y0z0')
sleep(0.1)
caput(pv.me14e_pmac_str, '#1hmz#2hmz#3hmz')
sleep(0.1)
print 5*'chip_type',type(chip_type)
if str(chip_type) =='1':
caput(pv.me14e_pmac_str, '!x0.4y0.4')
sleep(0.1)
caput(pv.me14e_pmac_str, '#1hmz#2hmz#3hmz')
print 10*'CSDone '
else:
caput(pv.me14e_pmac_str, '#1hmz#2hmz#3hmz')
print 10*'CSDone '
def cs_reset():
cs1 = "#1->%+10000X%+0Y%+0Z"
cs2 = "#2->%+0X%+10000Y%+0Z"
cs3 = "#3->0X+0Y+10000Z"
print '\n'.join([cs1, cs2, cs3])
caput(pv.me14e_pmac_str, '&2')
sleep(0.5)
caput(pv.me14e_pmac_str, cs1)
sleep(0.5)
caput(pv.me14e_pmac_str, cs2)
sleep(0.5)
caput(pv.me14e_pmac_str, cs3)
print 10*'CSDone '
def main(args):
if args[1] == 'initialise':
initialise()
elif args[1] == 'pvar_test':
chipid = args[2]
pvar_test(chipid)
elif args[1] == 'moveto':
moveto(args[2])
elif args[1] == 'fiducial':
fiducial(args[2])
elif args[1] == 'cs_maker':
cs_maker()
elif args[1] == 'write_parameter_file':
write_parameter_file()
startup.run()
elif args[1] == 'define_current_chip':
chipid = args[2]
define_current_chip(chipid)
elif args[1] == 'load_stock_map':
map_choice = args[2]
load_stock_map(map_choice)
elif args[1] == 'load_lite_map':
load_lite_map()
elif args[1] == 'load_full_map':
load_full_map()
elif args[1] == 'save_screen_map':
save_screen_map()
elif args[1] == 'upload_full':
upload_full()
elif args[1] == 'upload_parameters':
chipid = args[2]
upload_parameters(chipid)
elif args[1] == 'cs_reset':
cs_reset()
else:
print 'Unknown Command'
if __name__ == '__main__':
main(sys.argv)
| false
| true
|
f71968c2bfbb4980fde3dad9d2991f5150aef9eb
| 2,841
|
py
|
Python
|
setup.py
|
blazelibs/blazeweb
|
b120a6a2e38c8b53da2b73443ff242e2d1438053
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
blazelibs/blazeweb
|
b120a6a2e38c8b53da2b73443ff242e2d1438053
|
[
"BSD-3-Clause"
] | 6
|
2016-11-01T18:42:34.000Z
|
2020-11-16T16:52:14.000Z
|
setup.py
|
blazelibs/blazeweb
|
b120a6a2e38c8b53da2b73443ff242e2d1438053
|
[
"BSD-3-Clause"
] | 1
|
2020-01-22T18:20:46.000Z
|
2020-01-22T18:20:46.000Z
|
import os
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# pip install -e .[develop]
develop_requires = [
'WebTest',
'ScriptTest',
'coverage',
'docutils',
'minimock',
'nose',
]
cdir = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(cdir, 'readme.rst')).read()
CHANGELOG = open(os.path.join(cdir, 'changelog.rst')).read()
VERSION = open(os.path.join(cdir, 'blazeweb', 'version.txt')).read().strip()
required_packages = [
'Beaker>=1.5',
'BlazeUtils>0.3.7',
'Blinker>=1.0',
'decorator>=3.0.1',
'FormEncode>=1.2',
'html2text>=2.35',
'jinja2>=2.5',
'markdown2>=1.0.1',
'Paste>=1.7',
'PasteScript>=1.7',
'WebHelpers2',
'Werkzeug>=1.0.0',
]
try:
import json
del json
except ImportError:
required_packages.append('simplejson>=2.1.1')
setup(
name="BlazeWeb",
version=VERSION,
description="A light weight WSGI framework with a pluggable architecture",
long_description='\n\n'.join((README, CHANGELOG)),
author="Randy Syring",
author_email="randy.syring@level12.io",
url='http://pypi.python.org/pypi/BlazeWeb/',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP'
],
license='BSD',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=required_packages,
extras_require={'develop': develop_requires},
entry_points="""
[console_scripts]
bw = blazeweb.scripting:blazeweb_entry
[blazeweb.no_app_command]
help=paste.script.help:HelpCommand
project = blazeweb.commands:ProjectCommand
jinja-convert = blazeweb.commands:JinjaConvertCommand
[blazeweb.app_command]
serve = blazeweb.commands:ServeCommand
help = paste.script.help:HelpCommand
testrun = blazeweb.commands:TestRunCommand
tasks = blazeweb.commands:TasksCommand
shell = blazeweb.commands:ShellCommand
routes = blazeweb.commands:RoutesCommand
static-copy = blazeweb.commands:StaticCopyCommand
component-map = blazeweb.commands:ComponentMapCommand
[blazeweb.blazeweb_project_template]
minimal = blazeweb.paster_tpl:MinimalProjectTemplate
bwproject = blazeweb.paster_tpl:ProjectTemplate
[nose.plugins]
blazeweb_initapp = blazeweb.nose_plugin:InitAppPlugin
[pytest11]
blazeweb_initapp = blazeweb.pytest_plugin
""",
zip_safe=False
)
| 28.128713
| 78
| 0.67793
|
import os
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
develop_requires = [
'WebTest',
'ScriptTest',
'coverage',
'docutils',
'minimock',
'nose',
]
cdir = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(cdir, 'readme.rst')).read()
CHANGELOG = open(os.path.join(cdir, 'changelog.rst')).read()
VERSION = open(os.path.join(cdir, 'blazeweb', 'version.txt')).read().strip()
required_packages = [
'Beaker>=1.5',
'BlazeUtils>0.3.7',
'Blinker>=1.0',
'decorator>=3.0.1',
'FormEncode>=1.2',
'html2text>=2.35',
'jinja2>=2.5',
'markdown2>=1.0.1',
'Paste>=1.7',
'PasteScript>=1.7',
'WebHelpers2',
'Werkzeug>=1.0.0',
]
try:
import json
del json
except ImportError:
required_packages.append('simplejson>=2.1.1')
setup(
name="BlazeWeb",
version=VERSION,
description="A light weight WSGI framework with a pluggable architecture",
long_description='\n\n'.join((README, CHANGELOG)),
author="Randy Syring",
author_email="randy.syring@level12.io",
url='http://pypi.python.org/pypi/BlazeWeb/',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP'
],
license='BSD',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=required_packages,
extras_require={'develop': develop_requires},
entry_points="""
[console_scripts]
bw = blazeweb.scripting:blazeweb_entry
[blazeweb.no_app_command]
help=paste.script.help:HelpCommand
project = blazeweb.commands:ProjectCommand
jinja-convert = blazeweb.commands:JinjaConvertCommand
[blazeweb.app_command]
serve = blazeweb.commands:ServeCommand
help = paste.script.help:HelpCommand
testrun = blazeweb.commands:TestRunCommand
tasks = blazeweb.commands:TasksCommand
shell = blazeweb.commands:ShellCommand
routes = blazeweb.commands:RoutesCommand
static-copy = blazeweb.commands:StaticCopyCommand
component-map = blazeweb.commands:ComponentMapCommand
[blazeweb.blazeweb_project_template]
minimal = blazeweb.paster_tpl:MinimalProjectTemplate
bwproject = blazeweb.paster_tpl:ProjectTemplate
[nose.plugins]
blazeweb_initapp = blazeweb.nose_plugin:InitAppPlugin
[pytest11]
blazeweb_initapp = blazeweb.pytest_plugin
""",
zip_safe=False
)
| true
| true
|
f71969a63ad11dd00ce0c7b25f5d250f148a897c
| 2,807
|
py
|
Python
|
crossbaker/samples/declarative/signals/pytoqml1/main.py
|
josephkirk/MeshBaker
|
e4f75193074cc92d12f953d6cad3a2a599f63ead
|
[
"MIT"
] | null | null | null |
crossbaker/samples/declarative/signals/pytoqml1/main.py
|
josephkirk/MeshBaker
|
e4f75193074cc92d12f953d6cad3a2a599f63ead
|
[
"MIT"
] | 5
|
2018-10-09T02:43:14.000Z
|
2018-10-12T13:00:09.000Z
|
crossbaker/samples/declarative/signals/pytoqml1/main.py
|
josephkirk/CrossBaker
|
e4f75193074cc92d12f953d6cad3a2a599f63ead
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
from __future__ import print_function
import os
import sys
from PySide2.QtCore import QTimer, QUrl
from PySide2.QtGui import QGuiApplication
import PySide2.QtQml
from PySide2.QtQuick import QQuickView
if __name__ == '__main__':
app = QGuiApplication(sys.argv)
timer = QTimer()
timer.start(2000)
view = QQuickView()
qmlFile = os.path.join(os.path.dirname(__file__), 'view.qml')
view.setSource(QUrl.fromLocalFile(os.path.abspath(qmlFile)))
if view.status() == QQuickView.Error:
sys.exit(-1)
root = view.rootObject()
timer.timeout.connect(root.updateRotater)
view.show()
res = app.exec_()
# Deleting the view before it goes out of scope is required to make sure all child QML instances
# are destroyed in the correct order.
del view
sys.exit(res)
| 38.452055
| 100
| 0.695048
| true
| true
|
|
f7196a03c814613734c343483b20f67cde46b40d
| 260
|
py
|
Python
|
w02-calling-functions/checkpoint-boxes/boxes.py
|
carloswm85/2021-cs111-programming-with-functions
|
73cc376e3f0de60aa0150d33ec95568d217096ec
|
[
"Unlicense"
] | null | null | null |
w02-calling-functions/checkpoint-boxes/boxes.py
|
carloswm85/2021-cs111-programming-with-functions
|
73cc376e3f0de60aa0150d33ec95568d217096ec
|
[
"Unlicense"
] | null | null | null |
w02-calling-functions/checkpoint-boxes/boxes.py
|
carloswm85/2021-cs111-programming-with-functions
|
73cc376e3f0de60aa0150d33ec95568d217096ec
|
[
"Unlicense"
] | null | null | null |
import math
items = int(input("Enter the number of items: "))
items_box = int(input("Enter the number of items per box: "))
boxes = math.ceil(items / items_box)
print(f"For {items} items, packing {items_box} items in each box, you will need {boxes} boxes.")
| 32.5
| 96
| 0.711538
|
import math
items = int(input("Enter the number of items: "))
items_box = int(input("Enter the number of items per box: "))
boxes = math.ceil(items / items_box)
print(f"For {items} items, packing {items_box} items in each box, you will need {boxes} boxes.")
| true
| true
|
f7196aad36071501a72c16f5e95b38ddb5f8950b
| 902
|
py
|
Python
|
turbo/turbo_encoder.py
|
DaulPavid/pyturbo
|
878e0b1b514c043f1b4ea5cd5268b23c0df5192e
|
[
"MIT"
] | 9
|
2018-10-17T17:02:05.000Z
|
2022-03-03T18:58:32.000Z
|
turbo/turbo_encoder.py
|
akshay230994/pyturbo
|
878e0b1b514c043f1b4ea5cd5268b23c0df5192e
|
[
"MIT"
] | 2
|
2018-10-16T16:57:57.000Z
|
2020-04-14T13:34:40.000Z
|
turbo/turbo_encoder.py
|
akshay230994/pyturbo
|
878e0b1b514c043f1b4ea5cd5268b23c0df5192e
|
[
"MIT"
] | 4
|
2019-12-23T18:42:29.000Z
|
2022-01-19T12:08:35.000Z
|
#
# Turbo Encoder
#
import numpy as np
from .rsc import RSC
class TurboEncoder:
def __init__(self, interleaver):
self.interleaver = interleaver
self.block_size = len(self.interleaver)
self.encoders = 2 * [RSC()]
def reset(self):
for e in self.encoders:
e.reset()
def interleave(self, vector):
interleaved = np.zeros(self.block_size, dtype=int)
for i in range(0, self.block_size):
interleaved[i] = vector[self.interleaver[i]]
return interleaved
def execute(self, vector):
output_size = 3 * (len(vector) + len(self.encoders[0].registers))
output = np.zeros(output_size, dtype=int)
interleaved = self.interleave(vector)
output[1::3], output[::3] = self.encoders[0].execute(vector)
output[2::3], _ = self.encoders[1].execute(interleaved)
return output
| 25.055556
| 73
| 0.618625
|
import numpy as np
from .rsc import RSC
class TurboEncoder:
def __init__(self, interleaver):
self.interleaver = interleaver
self.block_size = len(self.interleaver)
self.encoders = 2 * [RSC()]
def reset(self):
for e in self.encoders:
e.reset()
def interleave(self, vector):
interleaved = np.zeros(self.block_size, dtype=int)
for i in range(0, self.block_size):
interleaved[i] = vector[self.interleaver[i]]
return interleaved
def execute(self, vector):
output_size = 3 * (len(vector) + len(self.encoders[0].registers))
output = np.zeros(output_size, dtype=int)
interleaved = self.interleave(vector)
output[1::3], output[::3] = self.encoders[0].execute(vector)
output[2::3], _ = self.encoders[1].execute(interleaved)
return output
| true
| true
|
f7196b897dd7d74bfa6480c1e1542bf851614cfa
| 534
|
py
|
Python
|
src/package/scanner.py
|
buckler-project/armoury
|
3d4c1bb9e8af190ba95d60d502b39699848d1e62
|
[
"MIT"
] | 1
|
2019-02-02T06:21:21.000Z
|
2019-02-02T06:21:21.000Z
|
src/package/scanner.py
|
buckler-project/armoury
|
3d4c1bb9e8af190ba95d60d502b39699848d1e62
|
[
"MIT"
] | 5
|
2019-01-28T00:59:15.000Z
|
2019-01-31T10:35:36.000Z
|
src/package/scanner.py
|
buckler-project/armoury
|
3d4c1bb9e8af190ba95d60d502b39699848d1e62
|
[
"MIT"
] | null | null | null |
from package import package
parent_path = '.scanners'
config_path = 'scanner.yml'
class Scanner(package.Package):
def __init__(self, url, name, auther):
super().__init__(url, name, auther)
self.parent_path = parent_path
self.config_path = config_path
class ScannerFactory(package.PackageFactory):
def __init__(self):
self.parent_path = parent_path
self.config_path = config_path
def _generate(self, url, name, auther):
return Scanner(url=url, name=name, auther=auther)
| 26.7
| 57
| 0.694757
|
from package import package
parent_path = '.scanners'
config_path = 'scanner.yml'
class Scanner(package.Package):
def __init__(self, url, name, auther):
super().__init__(url, name, auther)
self.parent_path = parent_path
self.config_path = config_path
class ScannerFactory(package.PackageFactory):
def __init__(self):
self.parent_path = parent_path
self.config_path = config_path
def _generate(self, url, name, auther):
return Scanner(url=url, name=name, auther=auther)
| true
| true
|
f7196bd5fe213fbeb54a87c93b46f43e8cb1f118
| 2,742
|
py
|
Python
|
inference_speed.py
|
wmcnally/evopose2d
|
ea05b818044d8d84e9cbbee778bc465be59ebd59
|
[
"MIT"
] | 75
|
2020-11-18T05:07:42.000Z
|
2022-03-27T03:25:16.000Z
|
inference_speed.py
|
wmcnally/evopose2d
|
ea05b818044d8d84e9cbbee778bc465be59ebd59
|
[
"MIT"
] | 26
|
2020-11-29T17:45:44.000Z
|
2022-03-22T15:30:31.000Z
|
inference_speed.py
|
wmcnally/evopose2d
|
ea05b818044d8d84e9cbbee778bc465be59ebd59
|
[
"MIT"
] | 8
|
2020-11-25T02:59:53.000Z
|
2022-03-27T10:53:59.000Z
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from dataset.dataloader import load_tfds
from time import time
import argparse
from nets.simple_basline import SimpleBaseline
from nets.evopose2d import EvoPose
from nets.hrnet import HRNet
from utils import detect_hardware
def speed_test(strategy, cfg, split='val', n=1000):
with strategy.scope():
if cfg.MODEL.TYPE == 'simple_baseline':
model = SimpleBaseline(cfg)
elif cfg.MODEL.TYPE == 'hrnet':
model = HRNet(cfg)
elif cfg.MODEL.TYPE == 'evopose':
model = EvoPose(cfg)
cfg.DATASET.OUTPUT_SHAPE = model.output_shape[1:]
ds = load_tfds(cfg, split, det=cfg.VAL.DET,
predict_kp=True, drop_remainder=cfg.VAL.DROP_REMAINDER)
ds = strategy.experimental_distribute_dataset(ds)
@tf.function
def predict(imgs, flip=False):
if flip:
imgs = imgs[:, :, ::-1, :]
return model(imgs, training=False)
for count, batch in enumerate(ds):
if count == 1: # skip first pass
ti = time()
_, imgs, _, _, scores = batch
hms = strategy.run(predict, args=(imgs,)).numpy()
if cfg.VAL.FLIP:
flip_hms = strategy.run(predict, args=(imgs, True,)).numpy()
flip_hms = flip_hms[:, :, ::-1, :]
tmp = flip_hms.copy()
for i in range(len(cfg.DATASET.KP_FLIP)):
flip_hms[:, :, :, i] = tmp[:, :, :, cfg.DATASET.KP_FLIP[i]]
# shift to align features
flip_hms[:, :, 1:, :] = flip_hms[:, :, 0:-1, :].copy()
hms = (hms + flip_hms) / 2.
if count == n:
break
print('FPS: {:.5f}'.format((n * cfg.VAL.BATCH_SIZE) / (time() - ti)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--tpu', default='')
parser.add_argument('-c', '--cfg', required=True) # yaml
parser.add_argument('-bs', '--batch-size', type=int, default=1)
parser.add_argument('-n', type=int, default=1000)
args = parser.parse_args()
from dataset.coco import cn as cfg
cfg.merge_from_file('configs/' + args.cfg)
cfg.MODEL.NAME = args.cfg.split('.')[0]
cfg.VAL.BATCH_SIZE = args.batch_size
if args.cpu:
strategy = tf.distribute.OneDeviceStrategy('/CPU:0')
elif args.gpu:
strategy = tf.distribute.OneDeviceStrategy('/GPU:0')
else:
tpu, strategy = detect_hardware(args.tpu)
tf.config.optimizer.set_experimental_options({'disable_meta_optimizer': True})
speed_test(strategy, cfg, split='val', n=args.n)
| 32.258824
| 82
| 0.610139
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from dataset.dataloader import load_tfds
from time import time
import argparse
from nets.simple_basline import SimpleBaseline
from nets.evopose2d import EvoPose
from nets.hrnet import HRNet
from utils import detect_hardware
def speed_test(strategy, cfg, split='val', n=1000):
with strategy.scope():
if cfg.MODEL.TYPE == 'simple_baseline':
model = SimpleBaseline(cfg)
elif cfg.MODEL.TYPE == 'hrnet':
model = HRNet(cfg)
elif cfg.MODEL.TYPE == 'evopose':
model = EvoPose(cfg)
cfg.DATASET.OUTPUT_SHAPE = model.output_shape[1:]
ds = load_tfds(cfg, split, det=cfg.VAL.DET,
predict_kp=True, drop_remainder=cfg.VAL.DROP_REMAINDER)
ds = strategy.experimental_distribute_dataset(ds)
@tf.function
def predict(imgs, flip=False):
if flip:
imgs = imgs[:, :, ::-1, :]
return model(imgs, training=False)
for count, batch in enumerate(ds):
if count == 1:
ti = time()
_, imgs, _, _, scores = batch
hms = strategy.run(predict, args=(imgs,)).numpy()
if cfg.VAL.FLIP:
flip_hms = strategy.run(predict, args=(imgs, True,)).numpy()
flip_hms = flip_hms[:, :, ::-1, :]
tmp = flip_hms.copy()
for i in range(len(cfg.DATASET.KP_FLIP)):
flip_hms[:, :, :, i] = tmp[:, :, :, cfg.DATASET.KP_FLIP[i]]
flip_hms[:, :, 1:, :] = flip_hms[:, :, 0:-1, :].copy()
hms = (hms + flip_hms) / 2.
if count == n:
break
print('FPS: {:.5f}'.format((n * cfg.VAL.BATCH_SIZE) / (time() - ti)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--tpu', default='')
parser.add_argument('-c', '--cfg', required=True)
parser.add_argument('-bs', '--batch-size', type=int, default=1)
parser.add_argument('-n', type=int, default=1000)
args = parser.parse_args()
from dataset.coco import cn as cfg
cfg.merge_from_file('configs/' + args.cfg)
cfg.MODEL.NAME = args.cfg.split('.')[0]
cfg.VAL.BATCH_SIZE = args.batch_size
if args.cpu:
strategy = tf.distribute.OneDeviceStrategy('/CPU:0')
elif args.gpu:
strategy = tf.distribute.OneDeviceStrategy('/GPU:0')
else:
tpu, strategy = detect_hardware(args.tpu)
tf.config.optimizer.set_experimental_options({'disable_meta_optimizer': True})
speed_test(strategy, cfg, split='val', n=args.n)
| true
| true
|
f7196c255322af385bffc89c3fcffebd8bcec16e
| 8,295
|
py
|
Python
|
tango/common/util.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 52
|
2021-09-24T17:52:34.000Z
|
2022-03-29T22:55:02.000Z
|
tango/common/util.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 90
|
2021-09-29T04:23:29.000Z
|
2022-03-31T21:23:02.000Z
|
tango/common/util.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 8
|
2021-11-13T01:56:22.000Z
|
2022-02-27T03:29:42.000Z
|
import importlib
import pkgutil
import signal
import string
import sys
import traceback
from contextlib import contextmanager
from datetime import datetime, tzinfo
from pathlib import Path
from typing import Iterable, Optional, Set, Tuple, Union
import pytz
from .aliases import PathOrStr
from .exceptions import SigTermReceived
def tango_cache_dir() -> Path:
"""
Returns a directory suitable for caching things from Tango, defaulting
to ``$HOME/.cache/tango``.
"""
cache_dir = Path.home() / ".cache" / "tango"
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir
def _handle_sigterm(sig, frame):
raise SigTermReceived
def install_sigterm_handler():
signal.signal(signal.SIGTERM, _handle_sigterm)
@contextmanager
def push_python_path(path: PathOrStr):
"""
Prepends the given path to `sys.path`.
This method is intended to use with `with`, so after its usage, its value willbe removed from
`sys.path`.
"""
# In some environments, such as TC, it fails when sys.path contains a relative path, such as ".".
path = Path(path).resolve()
path = str(path)
sys.path.insert(0, path)
try:
yield
finally:
# Better to remove by value, in case `sys.path` was manipulated in between.
sys.path.remove(path)
_extra_imported_modules: Set[str] = set()
def get_extra_imported_modules() -> Set[str]:
return _extra_imported_modules
def import_extra_module(package_name: str) -> None:
global _extra_imported_modules
import_module_and_submodules(package_name)
_extra_imported_modules.add(package_name)
def resolve_module_name(package_name: str) -> Tuple[str, Path]:
base_path = Path(".")
package_path = Path(package_name)
if not package_path.exists():
raise ValueError(f"'{package_path}' looks like a path, but the path does not exist")
parent = package_path.parent
while parent != parent.parent:
if (parent / "__init__.py").is_file():
parent = parent.parent
else:
base_path = parent
break
package_name = str(package_path.relative_to(base_path)).replace("/", ".")
if package_path.is_file():
if package_path.name == "__init__.py":
# If `__init__.py` file, resolve to the parent module.
package_name = package_name[: -len(".__init__.py")]
elif package_name.endswith(".py"):
package_name = package_name[:-3]
if not package_name:
raise ValueError(f"invalid package path '{package_path}'")
return package_name, base_path
def import_module_and_submodules(package_name: str, exclude: Optional[Set[str]] = None) -> None:
"""
Import all submodules under the given package.
Primarily useful so that people using tango can specify their own custom packages
and have their custom classes get loaded and registered.
"""
# If `package_name` is in the form of a path, convert to the module format.
if "/" in package_name or package_name.endswith(".py"):
package_name, base_path = resolve_module_name(package_name)
else:
base_path = Path(".")
if exclude and package_name in exclude:
return
importlib.invalidate_caches()
# For some reason, python doesn't always add this by default to your path, but you pretty much
# always want it when using `--include-package`. And if it's already there, adding it again at
# the end won't hurt anything.
with push_python_path(base_path):
# Import at top level
module = importlib.import_module(package_name)
path = getattr(module, "__path__", [])
path_string = "" if not path else path[0]
# walk_packages only finds immediate children, so need to recurse.
for module_finder, name, _ in pkgutil.walk_packages(path):
# Sometimes when you import third-party libraries that are on your path,
# `pkgutil.walk_packages` returns those too, so we need to skip them.
if path_string and module_finder.path != path_string: # type: ignore[union-attr]
continue
subpackage = f"{package_name}.{name}"
import_module_and_submodules(subpackage, exclude=exclude)
def _parse_bool(value: Union[bool, str]) -> bool:
if isinstance(value, bool):
return value
if value in {"1", "true", "True", "TRUE"}:
return True
return False
def _parse_optional_int(value: Optional[str]) -> Optional[int]:
if value is not None:
return int(value)
return None
def find_submodules(
module: Optional[str] = None,
match: Optional[Set[str]] = None,
exclude: Optional[Set[str]] = None,
recursive: bool = True,
) -> Iterable[str]:
"""
Find tango submodules.
"""
from fnmatch import fnmatch
root = Path(__file__).parent / ".."
if module:
if module.startswith("tango."):
module = module.replace("tango.", "", 1)
for m in module.split("."):
root = root / m
module = f"tango.{module}"
else:
module = "tango"
for path in root.iterdir():
if path.name[0] in {"_", "."}:
continue
submodule: str
if path.is_dir():
submodule = path.name
elif path.suffix == ".py":
submodule = path.name[:-3]
else:
continue
submodule = f"{module}.{submodule}"
if exclude and any((fnmatch(submodule, pat) for pat in exclude)):
continue
if match and not any((fnmatch(submodule, pat) for pat in match)):
continue
yield submodule
if recursive and path.is_dir():
yield from find_submodules(submodule, match=match, exclude=exclude)
def find_integrations() -> Iterable[str]:
"""
Find all tango integration modules.
"""
yield from find_submodules("tango.integrations", recursive=False)
SAFE_FILENAME_CHARS = frozenset("-_.%s%s" % (string.ascii_letters, string.digits))
def filename_is_safe(filename: str) -> bool:
return all(c in SAFE_FILENAME_CHARS for c in filename)
def could_be_class_name(name: str) -> bool:
if "." in name and not name.endswith("."):
return all([_is_valid_python_name(part) for part in name.split(".")])
else:
return False
def _is_valid_python_name(name: str) -> bool:
return bool(name and name[0].isalpha() and name.replace("_", "").isalnum())
def threaded_generator(g, queue_size: int = 16):
"""
Puts the generating side of a generator into its own thread
Let's say you have a generator that reads records from disk, and something that consumes the
generator that spends most of its time in PyTorch. Wouldn't it be great if you could read more
records while the PyTorch code runs? If you wrap your record-reading generator with
``threaded_generator(inner)``, that's exactly what happens. The reading code will run in a new thread,
while the consuming code runs in the main thread as normal. ``threaded_generator()`` uses a queue
to hand off items.
:param queue_size: the maximum queue size for hand-offs between the main thread and the generator thread
"""
from queue import Queue
from threading import Thread
q: Queue = Queue(maxsize=queue_size)
sentinel = object()
def fill_queue():
try:
for value in g:
q.put(value)
finally:
q.put(sentinel)
thread = Thread(name=repr(g), target=fill_queue, daemon=True)
thread.start()
yield from iter(q.get, sentinel)
thread.join()
def exception_to_string(e: BaseException) -> str:
"""
Generates a string that contains an exception plus stack frames based on an exception.
This became trivial in Python 3.10, but we need to run on Pytohn 3.7 as well.
"""
if sys.version_info >= (3, 10):
formatted = traceback.format_exception(e)
else:
formatted = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
return "".join(formatted)
def utc_now_datetime() -> datetime:
return datetime.utcnow().replace(tzinfo=pytz.utc)
def local_timezone() -> Optional[tzinfo]:
return datetime.now().astimezone().tzinfo
| 31.067416
| 108
| 0.660277
|
import importlib
import pkgutil
import signal
import string
import sys
import traceback
from contextlib import contextmanager
from datetime import datetime, tzinfo
from pathlib import Path
from typing import Iterable, Optional, Set, Tuple, Union
import pytz
from .aliases import PathOrStr
from .exceptions import SigTermReceived
def tango_cache_dir() -> Path:
cache_dir = Path.home() / ".cache" / "tango"
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir
def _handle_sigterm(sig, frame):
raise SigTermReceived
def install_sigterm_handler():
signal.signal(signal.SIGTERM, _handle_sigterm)
@contextmanager
def push_python_path(path: PathOrStr):
path = Path(path).resolve()
path = str(path)
sys.path.insert(0, path)
try:
yield
finally:
sys.path.remove(path)
_extra_imported_modules: Set[str] = set()
def get_extra_imported_modules() -> Set[str]:
return _extra_imported_modules
def import_extra_module(package_name: str) -> None:
global _extra_imported_modules
import_module_and_submodules(package_name)
_extra_imported_modules.add(package_name)
def resolve_module_name(package_name: str) -> Tuple[str, Path]:
base_path = Path(".")
package_path = Path(package_name)
if not package_path.exists():
raise ValueError(f"'{package_path}' looks like a path, but the path does not exist")
parent = package_path.parent
while parent != parent.parent:
if (parent / "__init__.py").is_file():
parent = parent.parent
else:
base_path = parent
break
package_name = str(package_path.relative_to(base_path)).replace("/", ".")
if package_path.is_file():
if package_path.name == "__init__.py":
package_name = package_name[: -len(".__init__.py")]
elif package_name.endswith(".py"):
package_name = package_name[:-3]
if not package_name:
raise ValueError(f"invalid package path '{package_path}'")
return package_name, base_path
def import_module_and_submodules(package_name: str, exclude: Optional[Set[str]] = None) -> None:
if "/" in package_name or package_name.endswith(".py"):
package_name, base_path = resolve_module_name(package_name)
else:
base_path = Path(".")
if exclude and package_name in exclude:
return
importlib.invalidate_caches()
# always want it when using `--include-package`. And if it's already there, adding it again at
with push_python_path(base_path):
# Import at top level
module = importlib.import_module(package_name)
path = getattr(module, "__path__", [])
path_string = "" if not path else path[0]
# walk_packages only finds immediate children, so need to recurse.
for module_finder, name, _ in pkgutil.walk_packages(path):
# Sometimes when you import third-party libraries that are on your path,
# `pkgutil.walk_packages` returns those too, so we need to skip them.
if path_string and module_finder.path != path_string: # type: ignore[union-attr]
continue
subpackage = f"{package_name}.{name}"
import_module_and_submodules(subpackage, exclude=exclude)
def _parse_bool(value: Union[bool, str]) -> bool:
if isinstance(value, bool):
return value
if value in {"1", "true", "True", "TRUE"}:
return True
return False
def _parse_optional_int(value: Optional[str]) -> Optional[int]:
if value is not None:
return int(value)
return None
def find_submodules(
module: Optional[str] = None,
match: Optional[Set[str]] = None,
exclude: Optional[Set[str]] = None,
recursive: bool = True,
) -> Iterable[str]:
from fnmatch import fnmatch
root = Path(__file__).parent / ".."
if module:
if module.startswith("tango."):
module = module.replace("tango.", "", 1)
for m in module.split("."):
root = root / m
module = f"tango.{module}"
else:
module = "tango"
for path in root.iterdir():
if path.name[0] in {"_", "."}:
continue
submodule: str
if path.is_dir():
submodule = path.name
elif path.suffix == ".py":
submodule = path.name[:-3]
else:
continue
submodule = f"{module}.{submodule}"
if exclude and any((fnmatch(submodule, pat) for pat in exclude)):
continue
if match and not any((fnmatch(submodule, pat) for pat in match)):
continue
yield submodule
if recursive and path.is_dir():
yield from find_submodules(submodule, match=match, exclude=exclude)
def find_integrations() -> Iterable[str]:
yield from find_submodules("tango.integrations", recursive=False)
SAFE_FILENAME_CHARS = frozenset("-_.%s%s" % (string.ascii_letters, string.digits))
def filename_is_safe(filename: str) -> bool:
return all(c in SAFE_FILENAME_CHARS for c in filename)
def could_be_class_name(name: str) -> bool:
if "." in name and not name.endswith("."):
return all([_is_valid_python_name(part) for part in name.split(".")])
else:
return False
def _is_valid_python_name(name: str) -> bool:
return bool(name and name[0].isalpha() and name.replace("_", "").isalnum())
def threaded_generator(g, queue_size: int = 16):
from queue import Queue
from threading import Thread
q: Queue = Queue(maxsize=queue_size)
sentinel = object()
def fill_queue():
try:
for value in g:
q.put(value)
finally:
q.put(sentinel)
thread = Thread(name=repr(g), target=fill_queue, daemon=True)
thread.start()
yield from iter(q.get, sentinel)
thread.join()
def exception_to_string(e: BaseException) -> str:
if sys.version_info >= (3, 10):
formatted = traceback.format_exception(e)
else:
formatted = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)
return "".join(formatted)
def utc_now_datetime() -> datetime:
return datetime.utcnow().replace(tzinfo=pytz.utc)
def local_timezone() -> Optional[tzinfo]:
return datetime.now().astimezone().tzinfo
| true
| true
|
f7196d520e18090ae1a9e39c71b4703d717c0c07
| 184
|
py
|
Python
|
ddq_1/lang/fol_quant.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | null | null | null |
ddq_1/lang/fol_quant.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | 6
|
2021-03-19T12:06:56.000Z
|
2022-03-12T00:23:09.000Z
|
ddq_1/lang/fol_quant.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | null | null | null |
'''
References:
- Symbolic Logic, Copi, p.396
'''
from .fol_lang import Wff, PropVarWff, BinaryWff, PropositionalVariable, NegWff
class QuantRule:
def applies_to():
pass
| 18.4
| 79
| 0.701087
|
from .fol_lang import Wff, PropVarWff, BinaryWff, PropositionalVariable, NegWff
class QuantRule:
def applies_to():
pass
| true
| true
|
f7196e814752cab4ff82754c55fd3672bc8fd585
| 29,282
|
py
|
Python
|
google/ads/google_ads/v6/proto/resources/ad_group_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/resources/ad_group_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/resources/ad_group_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/resources/ad_group.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.common import custom_parameter_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_common_dot_custom__parameter__pb2
from google.ads.google_ads.v6.proto.common import explorer_auto_optimizer_setting_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_common_dot_explorer__auto__optimizer__setting__pb2
from google.ads.google_ads.v6.proto.common import targeting_setting_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_common_dot_targeting__setting__pb2
from google.ads.google_ads.v6.proto.enums import ad_group_ad_rotation_mode_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__ad__rotation__mode__pb2
from google.ads.google_ads.v6.proto.enums import ad_group_status_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__status__pb2
from google.ads.google_ads.v6.proto.enums import ad_group_type_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__type__pb2
from google.ads.google_ads.v6.proto.enums import bidding_source_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2
from google.ads.google_ads.v6.proto.enums import targeting_dimension_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_targeting__dimension__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/resources/ad_group.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\014AdGroupProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n0google/ads/googleads/v6/resources/ad_group.proto\x12!google.ads.googleads.v6.resources\x1a\x35google/ads/googleads/v6/common/custom_parameter.proto\x1a\x44google/ads/googleads/v6/common/explorer_auto_optimizer_setting.proto\x1a\x36google/ads/googleads/v6/common/targeting_setting.proto\x1a=google/ads/googleads/v6/enums/ad_group_ad_rotation_mode.proto\x1a\x33google/ads/googleads/v6/enums/ad_group_status.proto\x1a\x31google/ads/googleads/v6/enums/ad_group_type.proto\x1a\x32google/ads/googleads/v6/enums/bidding_source.proto\x1a\x37google/ads/googleads/v6/enums/targeting_dimension.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\x81\x0f\n\x07\x41\x64Group\x12?\n\rresource_name\x18\x01 \x01(\tB(\xe0\x41\x05\xfa\x41\"\n googleads.googleapis.com/AdGroup\x12\x14\n\x02id\x18\" \x01(\x03\x42\x03\xe0\x41\x03H\x00\x88\x01\x01\x12\x11\n\x04name\x18# \x01(\tH\x01\x88\x01\x01\x12N\n\x06status\x18\x05 \x01(\x0e\x32>.google.ads.googleads.v6.enums.AdGroupStatusEnum.AdGroupStatus\x12M\n\x04type\x18\x0c \x01(\x0e\x32:.google.ads.googleads.v6.enums.AdGroupTypeEnum.AdGroupTypeB\x03\xe0\x41\x05\x12h\n\x10\x61\x64_rotation_mode\x18\x16 \x01(\x0e\x32N.google.ads.googleads.v6.enums.AdGroupAdRotationModeEnum.AdGroupAdRotationMode\x12\x44\n\rbase_ad_group\x18$ \x01(\tB(\xe0\x41\x03\xfa\x41\"\n googleads.googleapis.com/AdGroupH\x02\x88\x01\x01\x12\"\n\x15tracking_url_template\x18% \x01(\tH\x03\x88\x01\x01\x12N\n\x15url_custom_parameters\x18\x06 \x03(\x0b\x32/.google.ads.googleads.v6.common.CustomParameter\x12@\n\x08\x63\x61mpaign\x18& \x01(\tB)\xe0\x41\x05\xfa\x41#\n!googleads.googleapis.com/CampaignH\x04\x88\x01\x01\x12\x1b\n\x0e\x63pc_bid_micros\x18\' \x01(\x03H\x05\x88\x01\x01\x12\x1b\n\x0e\x63pm_bid_micros\x18( \x01(\x03H\x06\x88\x01\x01\x12\x1e\n\x11target_cpa_micros\x18) \x01(\x03H\x07\x88\x01\x01\x12 \n\x0e\x63pv_bid_micros\x18* \x01(\x03\x42\x03\xe0\x41\x03H\x08\x88\x01\x01\x12\x1e\n\x11target_cpm_micros\x18+ \x01(\x03H\t\x88\x01\x01\x12\x18\n\x0btarget_roas\x18, \x01(\x01H\n\x88\x01\x01\x12#\n\x16percent_cpc_bid_micros\x18- \x01(\x03H\x0b\x88\x01\x01\x12\x65\n\x1f\x65xplorer_auto_optimizer_setting\x18\x15 \x01(\x0b\x32<.google.ads.googleads.v6.common.ExplorerAutoOptimizerSetting\x12n\n\x1c\x64isplay_custom_bid_dimension\x18\x17 \x01(\x0e\x32H.google.ads.googleads.v6.enums.TargetingDimensionEnum.TargetingDimension\x12\x1d\n\x10\x66inal_url_suffix\x18. \x01(\tH\x0c\x88\x01\x01\x12K\n\x11targeting_setting\x18\x19 \x01(\x0b\x32\x30.google.ads.googleads.v6.common.TargetingSetting\x12-\n\x1b\x65\x66\x66\x65\x63tive_target_cpa_micros\x18/ \x01(\x03\x42\x03\xe0\x41\x03H\r\x88\x01\x01\x12h\n\x1b\x65\x66\x66\x65\x63tive_target_cpa_source\x18\x1d \x01(\x0e\x32>.google.ads.googleads.v6.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12\'\n\x15\x65\x66\x66\x65\x63tive_target_roas\x18\x30 \x01(\x01\x42\x03\xe0\x41\x03H\x0e\x88\x01\x01\x12i\n\x1c\x65\x66\x66\x65\x63tive_target_roas_source\x18 \x01(\x0e\x32>.google.ads.googleads.v6.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12=\n\x06labels\x18\x31 \x03(\tB-\xe0\x41\x03\xfa\x41\'\n%googleads.googleapis.com/AdGroupLabel:U\xea\x41R\n googleads.googleapis.com/AdGroup\x12.customers/{customer_id}/adGroups/{ad_group_id}B\x05\n\x03_idB\x07\n\x05_nameB\x10\n\x0e_base_ad_groupB\x18\n\x16_tracking_url_templateB\x0b\n\t_campaignB\x11\n\x0f_cpc_bid_microsB\x11\n\x0f_cpm_bid_microsB\x14\n\x12_target_cpa_microsB\x11\n\x0f_cpv_bid_microsB\x14\n\x12_target_cpm_microsB\x0e\n\x0c_target_roasB\x19\n\x17_percent_cpc_bid_microsB\x13\n\x11_final_url_suffixB\x1e\n\x1c_effective_target_cpa_microsB\x18\n\x16_effective_target_roasB\xf9\x01\n%com.google.ads.googleads.v6.resourcesB\x0c\x41\x64GroupProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_common_dot_custom__parameter__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_common_dot_explorer__auto__optimizer__setting__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_common_dot_targeting__setting__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__ad__rotation__mode__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_targeting__dimension__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUP = _descriptor.Descriptor(
name='AdGroup',
full_name='google.ads.googleads.v6.resources.AdGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.AdGroup.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A\"\n googleads.googleapis.com/AdGroup', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id', full_name='google.ads.googleads.v6.resources.AdGroup.id', index=1,
number=34, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='google.ads.googleads.v6.resources.AdGroup.name', index=2,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='google.ads.googleads.v6.resources.AdGroup.status', index=3,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='google.ads.googleads.v6.resources.AdGroup.type', index=4,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ad_rotation_mode', full_name='google.ads.googleads.v6.resources.AdGroup.ad_rotation_mode', index=5,
number=22, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='base_ad_group', full_name='google.ads.googleads.v6.resources.AdGroup.base_ad_group', index=6,
number=36, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A\"\n googleads.googleapis.com/AdGroup', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tracking_url_template', full_name='google.ads.googleads.v6.resources.AdGroup.tracking_url_template', index=7,
number=37, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url_custom_parameters', full_name='google.ads.googleads.v6.resources.AdGroup.url_custom_parameters', index=8,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='campaign', full_name='google.ads.googleads.v6.resources.AdGroup.campaign', index=9,
number=38, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A#\n!googleads.googleapis.com/Campaign', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup.cpc_bid_micros', index=10,
number=39, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpm_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup.cpm_bid_micros', index=11,
number=40, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target_cpa_micros', full_name='google.ads.googleads.v6.resources.AdGroup.target_cpa_micros', index=12,
number=41, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpv_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup.cpv_bid_micros', index=13,
number=42, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target_cpm_micros', full_name='google.ads.googleads.v6.resources.AdGroup.target_cpm_micros', index=14,
number=43, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target_roas', full_name='google.ads.googleads.v6.resources.AdGroup.target_roas', index=15,
number=44, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='percent_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup.percent_cpc_bid_micros', index=16,
number=45, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='explorer_auto_optimizer_setting', full_name='google.ads.googleads.v6.resources.AdGroup.explorer_auto_optimizer_setting', index=17,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='display_custom_bid_dimension', full_name='google.ads.googleads.v6.resources.AdGroup.display_custom_bid_dimension', index=18,
number=23, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='final_url_suffix', full_name='google.ads.googleads.v6.resources.AdGroup.final_url_suffix', index=19,
number=46, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='targeting_setting', full_name='google.ads.googleads.v6.resources.AdGroup.targeting_setting', index=20,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_target_cpa_micros', full_name='google.ads.googleads.v6.resources.AdGroup.effective_target_cpa_micros', index=21,
number=47, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_target_cpa_source', full_name='google.ads.googleads.v6.resources.AdGroup.effective_target_cpa_source', index=22,
number=29, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_target_roas', full_name='google.ads.googleads.v6.resources.AdGroup.effective_target_roas', index=23,
number=48, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_target_roas_source', full_name='google.ads.googleads.v6.resources.AdGroup.effective_target_roas_source', index=24,
number=32, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='google.ads.googleads.v6.resources.AdGroup.labels', index=25,
number=49, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A\'\n%googleads.googleapis.com/AdGroupLabel', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352AR\n googleads.googleapis.com/AdGroup\022.customers/{customer_id}/adGroups/{ad_group_id}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_id', full_name='google.ads.googleads.v6.resources.AdGroup._id',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_name', full_name='google.ads.googleads.v6.resources.AdGroup._name',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_base_ad_group', full_name='google.ads.googleads.v6.resources.AdGroup._base_ad_group',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_tracking_url_template', full_name='google.ads.googleads.v6.resources.AdGroup._tracking_url_template',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_campaign', full_name='google.ads.googleads.v6.resources.AdGroup._campaign',
index=4, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup._cpc_bid_micros',
index=5, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpm_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup._cpm_bid_micros',
index=6, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_target_cpa_micros', full_name='google.ads.googleads.v6.resources.AdGroup._target_cpa_micros',
index=7, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpv_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup._cpv_bid_micros',
index=8, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_target_cpm_micros', full_name='google.ads.googleads.v6.resources.AdGroup._target_cpm_micros',
index=9, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_target_roas', full_name='google.ads.googleads.v6.resources.AdGroup._target_roas',
index=10, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_percent_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup._percent_cpc_bid_micros',
index=11, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_final_url_suffix', full_name='google.ads.googleads.v6.resources.AdGroup._final_url_suffix',
index=12, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_target_cpa_micros', full_name='google.ads.googleads.v6.resources.AdGroup._effective_target_cpa_micros',
index=13, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_target_roas', full_name='google.ads.googleads.v6.resources.AdGroup._effective_target_roas',
index=14, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=635,
serialized_end=2556,
)
_ADGROUP.fields_by_name['status'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__status__pb2._ADGROUPSTATUSENUM_ADGROUPSTATUS
_ADGROUP.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__type__pb2._ADGROUPTYPEENUM_ADGROUPTYPE
_ADGROUP.fields_by_name['ad_rotation_mode'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__ad__rotation__mode__pb2._ADGROUPADROTATIONMODEENUM_ADGROUPADROTATIONMODE
_ADGROUP.fields_by_name['url_custom_parameters'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_custom__parameter__pb2._CUSTOMPARAMETER
_ADGROUP.fields_by_name['explorer_auto_optimizer_setting'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_explorer__auto__optimizer__setting__pb2._EXPLORERAUTOOPTIMIZERSETTING
_ADGROUP.fields_by_name['display_custom_bid_dimension'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_targeting__dimension__pb2._TARGETINGDIMENSIONENUM_TARGETINGDIMENSION
_ADGROUP.fields_by_name['targeting_setting'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_targeting__setting__pb2._TARGETINGSETTING
_ADGROUP.fields_by_name['effective_target_cpa_source'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUP.fields_by_name['effective_target_roas_source'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUP.oneofs_by_name['_id'].fields.append(
_ADGROUP.fields_by_name['id'])
_ADGROUP.fields_by_name['id'].containing_oneof = _ADGROUP.oneofs_by_name['_id']
_ADGROUP.oneofs_by_name['_name'].fields.append(
_ADGROUP.fields_by_name['name'])
_ADGROUP.fields_by_name['name'].containing_oneof = _ADGROUP.oneofs_by_name['_name']
_ADGROUP.oneofs_by_name['_base_ad_group'].fields.append(
_ADGROUP.fields_by_name['base_ad_group'])
_ADGROUP.fields_by_name['base_ad_group'].containing_oneof = _ADGROUP.oneofs_by_name['_base_ad_group']
_ADGROUP.oneofs_by_name['_tracking_url_template'].fields.append(
_ADGROUP.fields_by_name['tracking_url_template'])
_ADGROUP.fields_by_name['tracking_url_template'].containing_oneof = _ADGROUP.oneofs_by_name['_tracking_url_template']
_ADGROUP.oneofs_by_name['_campaign'].fields.append(
_ADGROUP.fields_by_name['campaign'])
_ADGROUP.fields_by_name['campaign'].containing_oneof = _ADGROUP.oneofs_by_name['_campaign']
_ADGROUP.oneofs_by_name['_cpc_bid_micros'].fields.append(
_ADGROUP.fields_by_name['cpc_bid_micros'])
_ADGROUP.fields_by_name['cpc_bid_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_cpc_bid_micros']
_ADGROUP.oneofs_by_name['_cpm_bid_micros'].fields.append(
_ADGROUP.fields_by_name['cpm_bid_micros'])
_ADGROUP.fields_by_name['cpm_bid_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_cpm_bid_micros']
_ADGROUP.oneofs_by_name['_target_cpa_micros'].fields.append(
_ADGROUP.fields_by_name['target_cpa_micros'])
_ADGROUP.fields_by_name['target_cpa_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_target_cpa_micros']
_ADGROUP.oneofs_by_name['_cpv_bid_micros'].fields.append(
_ADGROUP.fields_by_name['cpv_bid_micros'])
_ADGROUP.fields_by_name['cpv_bid_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_cpv_bid_micros']
_ADGROUP.oneofs_by_name['_target_cpm_micros'].fields.append(
_ADGROUP.fields_by_name['target_cpm_micros'])
_ADGROUP.fields_by_name['target_cpm_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_target_cpm_micros']
_ADGROUP.oneofs_by_name['_target_roas'].fields.append(
_ADGROUP.fields_by_name['target_roas'])
_ADGROUP.fields_by_name['target_roas'].containing_oneof = _ADGROUP.oneofs_by_name['_target_roas']
_ADGROUP.oneofs_by_name['_percent_cpc_bid_micros'].fields.append(
_ADGROUP.fields_by_name['percent_cpc_bid_micros'])
_ADGROUP.fields_by_name['percent_cpc_bid_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_percent_cpc_bid_micros']
_ADGROUP.oneofs_by_name['_final_url_suffix'].fields.append(
_ADGROUP.fields_by_name['final_url_suffix'])
_ADGROUP.fields_by_name['final_url_suffix'].containing_oneof = _ADGROUP.oneofs_by_name['_final_url_suffix']
_ADGROUP.oneofs_by_name['_effective_target_cpa_micros'].fields.append(
_ADGROUP.fields_by_name['effective_target_cpa_micros'])
_ADGROUP.fields_by_name['effective_target_cpa_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_effective_target_cpa_micros']
_ADGROUP.oneofs_by_name['_effective_target_roas'].fields.append(
_ADGROUP.fields_by_name['effective_target_roas'])
_ADGROUP.fields_by_name['effective_target_roas'].containing_oneof = _ADGROUP.oneofs_by_name['_effective_target_roas']
DESCRIPTOR.message_types_by_name['AdGroup'] = _ADGROUP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroup = _reflection.GeneratedProtocolMessageType('AdGroup', (_message.Message,), {
'DESCRIPTOR' : _ADGROUP,
'__module__' : 'google.ads.googleads.v6.resources.ad_group_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.AdGroup)
})
_sym_db.RegisterMessage(AdGroup)
DESCRIPTOR._options = None
_ADGROUP.fields_by_name['resource_name']._options = None
_ADGROUP.fields_by_name['id']._options = None
_ADGROUP.fields_by_name['type']._options = None
_ADGROUP.fields_by_name['base_ad_group']._options = None
_ADGROUP.fields_by_name['campaign']._options = None
_ADGROUP.fields_by_name['cpv_bid_micros']._options = None
_ADGROUP.fields_by_name['effective_target_cpa_micros']._options = None
_ADGROUP.fields_by_name['effective_target_cpa_source']._options = None
_ADGROUP.fields_by_name['effective_target_roas']._options = None
_ADGROUP.fields_by_name['effective_target_roas_source']._options = None
_ADGROUP.fields_by_name['labels']._options = None
_ADGROUP._options = None
# @@protoc_insertion_point(module_scope)
| 73.205
| 4,007
| 0.800287
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.common import custom_parameter_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_common_dot_custom__parameter__pb2
from google.ads.google_ads.v6.proto.common import explorer_auto_optimizer_setting_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_common_dot_explorer__auto__optimizer__setting__pb2
from google.ads.google_ads.v6.proto.common import targeting_setting_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_common_dot_targeting__setting__pb2
from google.ads.google_ads.v6.proto.enums import ad_group_ad_rotation_mode_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__ad__rotation__mode__pb2
from google.ads.google_ads.v6.proto.enums import ad_group_status_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__status__pb2
from google.ads.google_ads.v6.proto.enums import ad_group_type_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__type__pb2
from google.ads.google_ads.v6.proto.enums import bidding_source_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2
from google.ads.google_ads.v6.proto.enums import targeting_dimension_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_targeting__dimension__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/resources/ad_group.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\014AdGroupProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n0google/ads/googleads/v6/resources/ad_group.proto\x12!google.ads.googleads.v6.resources\x1a\x35google/ads/googleads/v6/common/custom_parameter.proto\x1a\x44google/ads/googleads/v6/common/explorer_auto_optimizer_setting.proto\x1a\x36google/ads/googleads/v6/common/targeting_setting.proto\x1a=google/ads/googleads/v6/enums/ad_group_ad_rotation_mode.proto\x1a\x33google/ads/googleads/v6/enums/ad_group_status.proto\x1a\x31google/ads/googleads/v6/enums/ad_group_type.proto\x1a\x32google/ads/googleads/v6/enums/bidding_source.proto\x1a\x37google/ads/googleads/v6/enums/targeting_dimension.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\x81\x0f\n\x07\x41\x64Group\x12?\n\rresource_name\x18\x01 \x01(\tB(\xe0\x41\x05\xfa\x41\"\n googleads.googleapis.com/AdGroup\x12\x14\n\x02id\x18\" \x01(\x03\x42\x03\xe0\x41\x03H\x00\x88\x01\x01\x12\x11\n\x04name\x18# \x01(\tH\x01\x88\x01\x01\x12N\n\x06status\x18\x05 \x01(\x0e\x32>.google.ads.googleads.v6.enums.AdGroupStatusEnum.AdGroupStatus\x12M\n\x04type\x18\x0c \x01(\x0e\x32:.google.ads.googleads.v6.enums.AdGroupTypeEnum.AdGroupTypeB\x03\xe0\x41\x05\x12h\n\x10\x61\x64_rotation_mode\x18\x16 \x01(\x0e\x32N.google.ads.googleads.v6.enums.AdGroupAdRotationModeEnum.AdGroupAdRotationMode\x12\x44\n\rbase_ad_group\x18$ \x01(\tB(\xe0\x41\x03\xfa\x41\"\n googleads.googleapis.com/AdGroupH\x02\x88\x01\x01\x12\"\n\x15tracking_url_template\x18% \x01(\tH\x03\x88\x01\x01\x12N\n\x15url_custom_parameters\x18\x06 \x03(\x0b\x32/.google.ads.googleads.v6.common.CustomParameter\x12@\n\x08\x63\x61mpaign\x18& \x01(\tB)\xe0\x41\x05\xfa\x41#\n!googleads.googleapis.com/CampaignH\x04\x88\x01\x01\x12\x1b\n\x0e\x63pc_bid_micros\x18\' \x01(\x03H\x05\x88\x01\x01\x12\x1b\n\x0e\x63pm_bid_micros\x18( \x01(\x03H\x06\x88\x01\x01\x12\x1e\n\x11target_cpa_micros\x18) \x01(\x03H\x07\x88\x01\x01\x12 \n\x0e\x63pv_bid_micros\x18* \x01(\x03\x42\x03\xe0\x41\x03H\x08\x88\x01\x01\x12\x1e\n\x11target_cpm_micros\x18+ \x01(\x03H\t\x88\x01\x01\x12\x18\n\x0btarget_roas\x18, \x01(\x01H\n\x88\x01\x01\x12#\n\x16percent_cpc_bid_micros\x18- \x01(\x03H\x0b\x88\x01\x01\x12\x65\n\x1f\x65xplorer_auto_optimizer_setting\x18\x15 \x01(\x0b\x32<.google.ads.googleads.v6.common.ExplorerAutoOptimizerSetting\x12n\n\x1c\x64isplay_custom_bid_dimension\x18\x17 \x01(\x0e\x32H.google.ads.googleads.v6.enums.TargetingDimensionEnum.TargetingDimension\x12\x1d\n\x10\x66inal_url_suffix\x18. \x01(\tH\x0c\x88\x01\x01\x12K\n\x11targeting_setting\x18\x19 \x01(\x0b\x32\x30.google.ads.googleads.v6.common.TargetingSetting\x12-\n\x1b\x65\x66\x66\x65\x63tive_target_cpa_micros\x18/ \x01(\x03\x42\x03\xe0\x41\x03H\r\x88\x01\x01\x12h\n\x1b\x65\x66\x66\x65\x63tive_target_cpa_source\x18\x1d \x01(\x0e\x32>.google.ads.googleads.v6.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12\'\n\x15\x65\x66\x66\x65\x63tive_target_roas\x18\x30 \x01(\x01\x42\x03\xe0\x41\x03H\x0e\x88\x01\x01\x12i\n\x1c\x65\x66\x66\x65\x63tive_target_roas_source\x18 \x01(\x0e\x32>.google.ads.googleads.v6.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12=\n\x06labels\x18\x31 \x03(\tB-\xe0\x41\x03\xfa\x41\'\n%googleads.googleapis.com/AdGroupLabel:U\xea\x41R\n googleads.googleapis.com/AdGroup\x12.customers/{customer_id}/adGroups/{ad_group_id}B\x05\n\x03_idB\x07\n\x05_nameB\x10\n\x0e_base_ad_groupB\x18\n\x16_tracking_url_templateB\x0b\n\t_campaignB\x11\n\x0f_cpc_bid_microsB\x11\n\x0f_cpm_bid_microsB\x14\n\x12_target_cpa_microsB\x11\n\x0f_cpv_bid_microsB\x14\n\x12_target_cpm_microsB\x0e\n\x0c_target_roasB\x19\n\x17_percent_cpc_bid_microsB\x13\n\x11_final_url_suffixB\x1e\n\x1c_effective_target_cpa_microsB\x18\n\x16_effective_target_roasB\xf9\x01\n%com.google.ads.googleads.v6.resourcesB\x0c\x41\x64GroupProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_common_dot_custom__parameter__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_common_dot_explorer__auto__optimizer__setting__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_common_dot_targeting__setting__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__ad__rotation__mode__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_targeting__dimension__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUP = _descriptor.Descriptor(
name='AdGroup',
full_name='google.ads.googleads.v6.resources.AdGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.AdGroup.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A\"\n googleads.googleapis.com/AdGroup', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='id', full_name='google.ads.googleads.v6.resources.AdGroup.id', index=1,
number=34, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='google.ads.googleads.v6.resources.AdGroup.name', index=2,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='google.ads.googleads.v6.resources.AdGroup.status', index=3,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='google.ads.googleads.v6.resources.AdGroup.type', index=4,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ad_rotation_mode', full_name='google.ads.googleads.v6.resources.AdGroup.ad_rotation_mode', index=5,
number=22, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='base_ad_group', full_name='google.ads.googleads.v6.resources.AdGroup.base_ad_group', index=6,
number=36, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A\"\n googleads.googleapis.com/AdGroup', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tracking_url_template', full_name='google.ads.googleads.v6.resources.AdGroup.tracking_url_template', index=7,
number=37, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url_custom_parameters', full_name='google.ads.googleads.v6.resources.AdGroup.url_custom_parameters', index=8,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='campaign', full_name='google.ads.googleads.v6.resources.AdGroup.campaign', index=9,
number=38, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A#\n!googleads.googleapis.com/Campaign', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup.cpc_bid_micros', index=10,
number=39, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpm_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup.cpm_bid_micros', index=11,
number=40, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target_cpa_micros', full_name='google.ads.googleads.v6.resources.AdGroup.target_cpa_micros', index=12,
number=41, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpv_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup.cpv_bid_micros', index=13,
number=42, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target_cpm_micros', full_name='google.ads.googleads.v6.resources.AdGroup.target_cpm_micros', index=14,
number=43, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target_roas', full_name='google.ads.googleads.v6.resources.AdGroup.target_roas', index=15,
number=44, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='percent_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup.percent_cpc_bid_micros', index=16,
number=45, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='explorer_auto_optimizer_setting', full_name='google.ads.googleads.v6.resources.AdGroup.explorer_auto_optimizer_setting', index=17,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='display_custom_bid_dimension', full_name='google.ads.googleads.v6.resources.AdGroup.display_custom_bid_dimension', index=18,
number=23, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='final_url_suffix', full_name='google.ads.googleads.v6.resources.AdGroup.final_url_suffix', index=19,
number=46, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='targeting_setting', full_name='google.ads.googleads.v6.resources.AdGroup.targeting_setting', index=20,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_target_cpa_micros', full_name='google.ads.googleads.v6.resources.AdGroup.effective_target_cpa_micros', index=21,
number=47, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_target_cpa_source', full_name='google.ads.googleads.v6.resources.AdGroup.effective_target_cpa_source', index=22,
number=29, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_target_roas', full_name='google.ads.googleads.v6.resources.AdGroup.effective_target_roas', index=23,
number=48, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_target_roas_source', full_name='google.ads.googleads.v6.resources.AdGroup.effective_target_roas_source', index=24,
number=32, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='google.ads.googleads.v6.resources.AdGroup.labels', index=25,
number=49, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A\'\n%googleads.googleapis.com/AdGroupLabel', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352AR\n googleads.googleapis.com/AdGroup\022.customers/{customer_id}/adGroups/{ad_group_id}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_id', full_name='google.ads.googleads.v6.resources.AdGroup._id',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_name', full_name='google.ads.googleads.v6.resources.AdGroup._name',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_base_ad_group', full_name='google.ads.googleads.v6.resources.AdGroup._base_ad_group',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_tracking_url_template', full_name='google.ads.googleads.v6.resources.AdGroup._tracking_url_template',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_campaign', full_name='google.ads.googleads.v6.resources.AdGroup._campaign',
index=4, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup._cpc_bid_micros',
index=5, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpm_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup._cpm_bid_micros',
index=6, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_target_cpa_micros', full_name='google.ads.googleads.v6.resources.AdGroup._target_cpa_micros',
index=7, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpv_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup._cpv_bid_micros',
index=8, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_target_cpm_micros', full_name='google.ads.googleads.v6.resources.AdGroup._target_cpm_micros',
index=9, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_target_roas', full_name='google.ads.googleads.v6.resources.AdGroup._target_roas',
index=10, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_percent_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroup._percent_cpc_bid_micros',
index=11, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_final_url_suffix', full_name='google.ads.googleads.v6.resources.AdGroup._final_url_suffix',
index=12, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_target_cpa_micros', full_name='google.ads.googleads.v6.resources.AdGroup._effective_target_cpa_micros',
index=13, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_target_roas', full_name='google.ads.googleads.v6.resources.AdGroup._effective_target_roas',
index=14, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=635,
serialized_end=2556,
)
_ADGROUP.fields_by_name['status'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__status__pb2._ADGROUPSTATUSENUM_ADGROUPSTATUS
_ADGROUP.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__type__pb2._ADGROUPTYPEENUM_ADGROUPTYPE
_ADGROUP.fields_by_name['ad_rotation_mode'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__ad__rotation__mode__pb2._ADGROUPADROTATIONMODEENUM_ADGROUPADROTATIONMODE
_ADGROUP.fields_by_name['url_custom_parameters'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_custom__parameter__pb2._CUSTOMPARAMETER
_ADGROUP.fields_by_name['explorer_auto_optimizer_setting'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_explorer__auto__optimizer__setting__pb2._EXPLORERAUTOOPTIMIZERSETTING
_ADGROUP.fields_by_name['display_custom_bid_dimension'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_targeting__dimension__pb2._TARGETINGDIMENSIONENUM_TARGETINGDIMENSION
_ADGROUP.fields_by_name['targeting_setting'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_targeting__setting__pb2._TARGETINGSETTING
_ADGROUP.fields_by_name['effective_target_cpa_source'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUP.fields_by_name['effective_target_roas_source'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUP.oneofs_by_name['_id'].fields.append(
_ADGROUP.fields_by_name['id'])
_ADGROUP.fields_by_name['id'].containing_oneof = _ADGROUP.oneofs_by_name['_id']
_ADGROUP.oneofs_by_name['_name'].fields.append(
_ADGROUP.fields_by_name['name'])
_ADGROUP.fields_by_name['name'].containing_oneof = _ADGROUP.oneofs_by_name['_name']
_ADGROUP.oneofs_by_name['_base_ad_group'].fields.append(
_ADGROUP.fields_by_name['base_ad_group'])
_ADGROUP.fields_by_name['base_ad_group'].containing_oneof = _ADGROUP.oneofs_by_name['_base_ad_group']
_ADGROUP.oneofs_by_name['_tracking_url_template'].fields.append(
_ADGROUP.fields_by_name['tracking_url_template'])
_ADGROUP.fields_by_name['tracking_url_template'].containing_oneof = _ADGROUP.oneofs_by_name['_tracking_url_template']
_ADGROUP.oneofs_by_name['_campaign'].fields.append(
_ADGROUP.fields_by_name['campaign'])
_ADGROUP.fields_by_name['campaign'].containing_oneof = _ADGROUP.oneofs_by_name['_campaign']
_ADGROUP.oneofs_by_name['_cpc_bid_micros'].fields.append(
_ADGROUP.fields_by_name['cpc_bid_micros'])
_ADGROUP.fields_by_name['cpc_bid_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_cpc_bid_micros']
_ADGROUP.oneofs_by_name['_cpm_bid_micros'].fields.append(
_ADGROUP.fields_by_name['cpm_bid_micros'])
_ADGROUP.fields_by_name['cpm_bid_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_cpm_bid_micros']
_ADGROUP.oneofs_by_name['_target_cpa_micros'].fields.append(
_ADGROUP.fields_by_name['target_cpa_micros'])
_ADGROUP.fields_by_name['target_cpa_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_target_cpa_micros']
_ADGROUP.oneofs_by_name['_cpv_bid_micros'].fields.append(
_ADGROUP.fields_by_name['cpv_bid_micros'])
_ADGROUP.fields_by_name['cpv_bid_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_cpv_bid_micros']
_ADGROUP.oneofs_by_name['_target_cpm_micros'].fields.append(
_ADGROUP.fields_by_name['target_cpm_micros'])
_ADGROUP.fields_by_name['target_cpm_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_target_cpm_micros']
_ADGROUP.oneofs_by_name['_target_roas'].fields.append(
_ADGROUP.fields_by_name['target_roas'])
_ADGROUP.fields_by_name['target_roas'].containing_oneof = _ADGROUP.oneofs_by_name['_target_roas']
_ADGROUP.oneofs_by_name['_percent_cpc_bid_micros'].fields.append(
_ADGROUP.fields_by_name['percent_cpc_bid_micros'])
_ADGROUP.fields_by_name['percent_cpc_bid_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_percent_cpc_bid_micros']
_ADGROUP.oneofs_by_name['_final_url_suffix'].fields.append(
_ADGROUP.fields_by_name['final_url_suffix'])
_ADGROUP.fields_by_name['final_url_suffix'].containing_oneof = _ADGROUP.oneofs_by_name['_final_url_suffix']
_ADGROUP.oneofs_by_name['_effective_target_cpa_micros'].fields.append(
_ADGROUP.fields_by_name['effective_target_cpa_micros'])
_ADGROUP.fields_by_name['effective_target_cpa_micros'].containing_oneof = _ADGROUP.oneofs_by_name['_effective_target_cpa_micros']
_ADGROUP.oneofs_by_name['_effective_target_roas'].fields.append(
_ADGROUP.fields_by_name['effective_target_roas'])
_ADGROUP.fields_by_name['effective_target_roas'].containing_oneof = _ADGROUP.oneofs_by_name['_effective_target_roas']
DESCRIPTOR.message_types_by_name['AdGroup'] = _ADGROUP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroup = _reflection.GeneratedProtocolMessageType('AdGroup', (_message.Message,), {
'DESCRIPTOR' : _ADGROUP,
'__module__' : 'google.ads.googleads.v6.resources.ad_group_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.AdGroup)
})
_sym_db.RegisterMessage(AdGroup)
DESCRIPTOR._options = None
_ADGROUP.fields_by_name['resource_name']._options = None
_ADGROUP.fields_by_name['id']._options = None
_ADGROUP.fields_by_name['type']._options = None
_ADGROUP.fields_by_name['base_ad_group']._options = None
_ADGROUP.fields_by_name['campaign']._options = None
_ADGROUP.fields_by_name['cpv_bid_micros']._options = None
_ADGROUP.fields_by_name['effective_target_cpa_micros']._options = None
_ADGROUP.fields_by_name['effective_target_cpa_source']._options = None
_ADGROUP.fields_by_name['effective_target_roas']._options = None
_ADGROUP.fields_by_name['effective_target_roas_source']._options = None
_ADGROUP.fields_by_name['labels']._options = None
_ADGROUP._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
f719701d150a3482167aae75965a980e8a9f516b
| 2,199
|
py
|
Python
|
backend/wazzle_33192/urls.py
|
crowdbotics-apps/wazzle-33192
|
6203ab17b0c80344f1b15d1d5452bfdd2e6559bd
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/wazzle_33192/urls.py
|
crowdbotics-apps/wazzle-33192
|
6203ab17b0c80344f1b15d1d5452bfdd2e6559bd
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/wazzle_33192/urls.py
|
crowdbotics-apps/wazzle-33192
|
6203ab17b0c80344f1b15d1d5452bfdd2e6559bd
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""wazzle_33192 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Wazzle"
admin.site.site_title = "Wazzle Admin Portal"
admin.site.index_title = "Wazzle Admin"
# swagger
api_info = openapi.Info(
title="Wazzle API",
default_version="v1",
description="API documentation for Wazzle App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| 34.904762
| 87
| 0.710778
|
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Wazzle"
admin.site.site_title = "Wazzle Admin Portal"
admin.site.index_title = "Wazzle Admin"
api_info = openapi.Info(
title="Wazzle API",
default_version="v1",
description="API documentation for Wazzle App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.