blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9df026c59e821730f268df55618abaf9c8173f10
|
76a1601da076db21fca2061252c542b146db5d42
|
/tests/test_benchmark.py
|
e5f2ebc14f4800cc6c543fcde634abed0f22c01b
|
[
"MIT"
] |
permissive
|
kyan001/ping3
|
247a9fc8439e5c4c506d866fe3f32d1a40e00296
|
9fff91181ea605d1957643af381ee5fb7f3e558a
|
refs/heads/master
| 2023-06-07T22:20:44.020341
| 2022-12-15T11:55:18
| 2022-12-15T11:55:18
| 111,969,722
| 293
| 72
|
MIT
| 2023-04-18T16:15:11
| 2017-11-25T02:16:11
|
Python
|
UTF-8
|
Python
| false
| false
| 706
|
py
|
test_benchmark.py
|
import os
import sys
import timeit
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import ping3 # noqa: linter (pycodestyle) should not lint this line.
dev_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
stmt = "ping3.ping('127.0.0.1')"
setup = "import sys; sys.path.insert(0, '{}'); import ping3; print('ping3 version:', ping3.__version__)".format(dev_dir)
for count in (1, 10, 100, 1000, 5000):
print("Testing `{stmt}` {num} times...".format(stmt=stmt, num=count))
duration = timeit.timeit(stmt, setup=setup, number=count)
print("Duration: {drtn:.3f} seconds. {d:.1f} ms/ping".format(drtn=duration, d=duration * 1000 / count))
print()
|
a2fc00c6148574a23da0f795067dd568150e9003
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/PhysicsTools/Utilities/python/pileupFilter_cff.py
|
9b997348489bb365ba2d5acf1a6e3de34edd3de8
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
pileupFilter_cff.py
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.Utilities.pileupFilter_cfi import *
pileupFilter.pileupInfoSummaryInputTag = cms.InputTag("addPileupInfo")
pu20to25 = pileupFilter.clone()
pu20to25.minPU = cms.double(20)
pu20to25.maxPU = cms.double(25)
pu25to30 = pileupFilter.clone()
pu25to30.minPU = cms.double(25)
pu25to30.maxPU = cms.double(30)
pu30to35 = pileupFilter.clone()
pu30to35.minPU = cms.double(30)
pu30to35.maxPU = cms.double(35)
pu35to40 = pileupFilter.clone()
pu35to40.minPU = cms.double(35)
pu35to40.maxPU = cms.double(40)
pu40to45 = pileupFilter.clone()
pu40to45.minPU = cms.double(40)
pu40to45.maxPU = cms.double(45)
pu45to50 = pileupFilter.clone()
pu45to50.minPU = cms.double(45)
pu45to50.maxPU = cms.double(50)
pu50to55 = pileupFilter.clone()
pu50to55.minPU = cms.double(50)
pu50to55.maxPU = cms.double(55)
pu55to60 = pileupFilter.clone()
pu55to60.minPU = cms.double(55)
pu55to60.maxPU = cms.double(60)
pu60to65 = pileupFilter.clone()
pu60to65.minPU = cms.double(60)
pu60to65.maxPU = cms.double(65)
|
5bb7f587be6b14c83c23642ebac70d864c5860eb
|
66ce19daa74e0d1e796300b27f66aedea0820b13
|
/cramming/backend/optimizers/schedulers.py
|
840b63eb0be04800b6a076980585fb2b3e422f03
|
[
"MIT"
] |
permissive
|
JonasGeiping/cramming
|
5a8026858fb730660959439c47c3c5e4ebf1722a
|
1397b8c8ecf11e7a8e714d17d44f44e3664af711
|
refs/heads/main
| 2023-08-17T07:18:49.370984
| 2023-08-07T14:13:26
| 2023-08-07T14:13:26
| 583,172,165
| 1,111
| 87
|
MIT
| 2023-06-13T16:49:39
| 2022-12-29T01:29:41
|
Python
|
UTF-8
|
Python
| false
| false
| 20,537
|
py
|
schedulers.py
|
"""Misc. optimizer implementations."""
import transformers
import math
from torch.optim.lr_scheduler import LambdaLR
import time
from functools import partial
def get_schedule_fn(initial_time, cfg_train):
"""Returns a callable scheduler_fn(optimizer).
Todo: Sanitize and unify these schedulers...
"""
if (cfg_train.warmup_steps) > 0 and (cfg_train.warmup_steps < 1):
# warmup could be a percentage in which case this line converts to steps again
cfg_train.warmup_steps = int(cfg_train.warmup_steps * cfg_train.steps)
if (cfg_train.cooldown_steps) > 0 and (cfg_train.cooldown_steps < 1):
# cooldown could be a percentage in which case this line converts to steps again
cfg_train.cooldown_steps = int(cfg_train.cooldown_steps * cfg_train.steps)
# Load huggingface schedulers based on total steps
if cfg_train.scheduler == "polynomial-decay":
scheduler_fn = partial(
transformers.get_polynomial_decay_schedule_with_warmup,
num_warmup_steps=cfg_train.warmup_steps,
num_training_steps=cfg_train.steps,
lr_end=1e-7,
power=1.0,
)
elif cfg_train.scheduler == "cosine-decay":
scheduler_fn = partial(
transformers.get_cosine_schedule_with_warmup,
num_warmup_steps=cfg_train.warmup_steps,
num_training_steps=cfg_train.steps,
num_cycles=0.5,
)
elif cfg_train.scheduler == "inverse-sqrt":
scheduler_fn = partial(
get_inverse_sqrt_scheduler,
num_warmup_steps=cfg_train.warmup_steps,
num_cooldown_steps=cfg_train.cooldown_steps,
num_training_steps=cfg_train.steps,
)
elif cfg_train.scheduler == "one-cycle": # this is a simplified one-cycle
scheduler_fn = partial(
get_one_cycle,
num_training_steps=cfg_train.steps,
)
elif cfg_train.scheduler == "ramp": # this is a simplified one-cycle
scheduler_fn = partial(
get_ramp,
num_cooldown_steps=cfg_train.cooldown_steps,
num_training_steps=cfg_train.steps,
)
"""Budget Schedulers from here: """
elif cfg_train.scheduler == "budget-inverse-sqrt":
scheduler_fn = partial(
get_budget_inv_sqrt_scheduler,
hour_budget=cfg_train.budget,
num_warmup_steps=cfg_train.warmup_steps,
num_cooldown_steps=cfg_train.cooldown_steps,
num_training_steps=cfg_train.steps,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-constant":
scheduler_fn = partial(
get_budget_constant_scheduler,
hour_budget=cfg_train.budget,
num_warmup_steps=cfg_train.warmup_steps,
num_cooldown_steps=cfg_train.cooldown_steps,
num_training_steps=cfg_train.steps,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-cosine-decay":
scheduler_fn = partial(
get_budget_cosine_schedule_with_warmup,
hour_budget=cfg_train.budget,
num_warmup_steps=cfg_train.warmup_steps,
num_training_steps=cfg_train.steps,
num_cycles=0.5,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-cosine-annealing":
scheduler_fn = partial(
get_budget_cosine_half_cycles_with_warmup,
hour_budget=cfg_train.budget,
num_warmup_steps=cfg_train.warmup_steps,
num_training_steps=cfg_train.steps,
num_cycles=4,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-linear":
scheduler_fn = partial(
get_budget_linear_schedule_with_warmup,
hour_budget=cfg_train.budget,
num_warmup_steps=cfg_train.warmup_steps,
num_training_steps=cfg_train.steps,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-polynomial":
scheduler_fn = partial(
get_budget_polynomial_decay_with_warmup,
hour_budget=cfg_train.budget,
num_warmup_steps=cfg_train.warmup_steps,
num_training_steps=cfg_train.steps,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-one-cycle": # this is a simplified one-cycle
scheduler_fn = partial(
get_budget_one_cycle,
hour_budget=cfg_train.budget,
num_training_steps=cfg_train.steps,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-multi-cycle":
scheduler_fn = partial(
get_budget_multi_cycle,
hour_budget=cfg_train.budget,
num_training_steps=cfg_train.steps,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-ramp":
scheduler_fn = partial(
get_budget_ramp,
hour_budget=cfg_train.budget,
num_cooldown_steps=cfg_train.cooldown_steps,
num_training_steps=cfg_train.steps,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-inv-cosine":
scheduler_fn = partial(
get_budget_inv_cosine_schedule,
hour_budget=cfg_train.budget,
num_cooldown_steps=cfg_train.cooldown_steps,
num_training_steps=cfg_train.steps,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-dive":
scheduler_fn = partial(
get_budget_dive,
hour_budget=cfg_train.budget,
num_training_steps=cfg_train.steps,
num_warmup_steps=cfg_train.warmup_steps,
falloff=0.5,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-dive-slow":
scheduler_fn = partial(
get_budget_dive,
hour_budget=cfg_train.budget,
num_training_steps=cfg_train.steps,
num_warmup_steps=cfg_train.warmup_steps,
falloff=0.75,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-dive-fast":
scheduler_fn = partial(
get_budget_dive,
hour_budget=cfg_train.budget,
num_training_steps=cfg_train.steps,
num_warmup_steps=cfg_train.warmup_steps,
falloff=0.25,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-triangle1":
scheduler_fn = partial(
get_budget_triangle,
hour_budget=cfg_train.budget,
num_training_steps=cfg_train.steps,
falloff=0.25,
base_percentage=0.5,
initial_time=initial_time,
)
elif cfg_train.scheduler == "budget-triangle2":
scheduler_fn = partial(
get_budget_triangle,
hour_budget=cfg_train.budget,
num_training_steps=cfg_train.steps,
falloff=0.25,
base_percentage=0.25,
initial_time=initial_time,
)
elif cfg_train.scheduler in [
"linear",
"cosine",
"cosine_with_restarts",
"polynomial",
"constant",
"constant_with_warmup",
"get_cosine_with_hard_restarts_schedule_with_warmup",
"get_polynomial_decay_schedule_with_warmup",
]:
def scheduler_fn(optimizer):
return transformers.get_scheduler(
name=cfg_train.scheduler,
optimizer=optimizer,
num_warmup_steps=cfg_train.warmup_steps,
num_training_steps=cfg_train.steps,
)
elif cfg_train.scheduler == "none" or cfg_train.scheduler is None:
scheduler_fn = DumbScheduler
else:
raise ValueError(f"Invalid schedule {cfg_train.scheduler} given.")
return scheduler_fn
class DumbScheduler:
def __init__(self, *args, **kwargs):
self._step_count = 0
def step(self, *args, **kwargs):
self._step_count += 1
def _initial_step(self):
self.optimizer._step_count = 0
self._step_count = 0
self.step()
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def get_last_lr(self):
"""Return last computed learning rate by current scheduler."""
return float("NaN")
def get_lr(self):
return float("NaN")
def print_lr(self, is_verbose, group, lr, epoch=None):
print(float("NaN"))
"""FairSeq-like inverse-square-root scheduler:"""
def get_inverse_sqrt_scheduler(optimizer, num_warmup_steps, num_cooldown_steps, num_training_steps):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (`--warmup-init-lr`) until the configured
learning rate (`--lr`). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup:
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup:
lr = decay_factor / sqrt(update_num)
where
decay_factor = args.lr * sqrt(args.warmup_updates)
"""
# linearly warmup for the first args.warmup_updates
lr_step = 1 / num_warmup_steps
# then, decay prop. to the inverse square root of the update number
decay_factor = num_warmup_steps**0.5
decayed_lr = decay_factor * (num_training_steps - num_cooldown_steps) ** -0.5
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step * lr_step)
elif current_step > (num_training_steps - num_cooldown_steps):
return max(0.0, float(decayed_lr * (num_training_steps - current_step) / num_cooldown_steps))
else:
return float(decay_factor * current_step**-0.5)
return LambdaLR(optimizer, lr_lambda, last_epoch=-1)
def get_one_cycle(optimizer, num_training_steps):
"""Simple single-cycle scheduler. Not including paper/fastai three-phase things or asymmetry."""
def lr_lambda(current_step):
if current_step < num_training_steps / 2:
return float(current_step / (num_training_steps / 2))
else:
return float(2 - current_step / (num_training_steps / 2))
return LambdaLR(optimizer, lr_lambda, -1)
def get_ramp(optimizer, num_cooldown_steps, num_training_steps):
"""to the MOON."""
max_lr = (num_training_steps - num_cooldown_steps) / num_training_steps
def lr_lambda(current_step):
if current_step > (num_training_steps - num_cooldown_steps):
return max(0.0, float(max_lr * (num_training_steps - current_step) / num_cooldown_steps))
else:
return float(current_step / num_training_steps)
return LambdaLR(optimizer, lr_lambda, -1)
"""Wallclock time schedulers."""
def _get_fake_step(current_step, initial_time, hour_budget, num_training_steps):
elapsed_hours = (time.time() - initial_time) / 60 / 60
if current_step == 0:
fake_step = 0
else:
fake_step = int(elapsed_hours / hour_budget * num_training_steps)
return fake_step
def get_budget_inv_sqrt_scheduler(optimizer, hour_budget, num_warmup_steps, num_cooldown_steps, num_training_steps, initial_time=None):
"""Time-based scheduler as described in Iszak et al. plus inv_sqrt.
Takes in num_warmup_steps and num_training_steps as normal, but actually squeezes the planned schedule into the
budget given by hour_budget, based on wallclock measurements.
Reference: https://github.com/IntelLabs/academic-budget-bert/blob/main/pretraining/schedules.py
"""
decay_factor = num_warmup_steps**0.5
decayed_lr = decay_factor * (num_training_steps - num_cooldown_steps) ** -0.5
def lr_lambda(current_step: int):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
if fake_step < num_warmup_steps:
return float(fake_step / num_warmup_steps)
elif fake_step > (num_training_steps - num_cooldown_steps):
return max(0.0, float(decayed_lr * (num_training_steps - fake_step) / num_cooldown_steps))
else:
return float(decay_factor * fake_step**-0.5)
return LambdaLR(optimizer, lr_lambda, last_epoch=-1)
def get_budget_constant_scheduler(optimizer, hour_budget, num_warmup_steps, num_cooldown_steps, num_training_steps, initial_time):
"""Time-based scheduler with optional warmup and cooldown (so technically a trapezoidal shape)"""
def lr_lambda(current_step: int):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
if fake_step < num_warmup_steps:
return float(fake_step / num_warmup_steps)
elif fake_step > (num_training_steps - num_cooldown_steps):
return max(0.0, float((num_training_steps - fake_step) / num_cooldown_steps))
else:
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch=-1)
def get_budget_linear_schedule_with_warmup(optimizer, hour_budget, num_warmup_steps, num_training_steps, initial_time, num_cycles=0.5):
"""Follows the huggingface transformers scheduler with the same name, but gets an additional arg hour_budget"""
def lr_lambda(current_step):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
if fake_step < num_warmup_steps:
return float(fake_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - fake_step) / float(max(1, num_training_steps - num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, -1)
def get_budget_cosine_schedule_with_warmup(optimizer, hour_budget, num_warmup_steps, num_training_steps, initial_time, num_cycles=0.5):
"""Follows the huggingface transformers scheduler with the same name, but gets an additional arg hour_budget"""
def lr_lambda(current_step):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
if fake_step < num_warmup_steps:
return float(fake_step) / float(max(1, num_warmup_steps))
progress = float(fake_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return LambdaLR(optimizer, lr_lambda, -1)
def get_budget_cosine_half_cycles_with_warmup(optimizer, hour_budget, num_warmup_steps, num_training_steps, initial_time, num_cycles=0.5):
"""Follows the huggingface transformers scheduler with the same name, but gets an additional arg hour_budget"""
def lr_lambda(current_step):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
if fake_step < num_warmup_steps:
return float(fake_step) / float(max(1, num_warmup_steps))
progress = float(fake_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return LambdaLR(optimizer, lr_lambda, -1)
def get_budget_one_cycle(optimizer, hour_budget, num_training_steps, initial_time):
"""Simple single-cycle scheduler. Not including paper/fastai three-phase things or asymmetry."""
def lr_lambda(current_step):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
if fake_step < num_training_steps / 2:
return float(fake_step / (num_training_steps / 2))
else:
return float(2 - fake_step / (num_training_steps / 2))
return LambdaLR(optimizer, lr_lambda, -1)
def get_budget_multi_cycle(optimizer, hour_budget, num_training_steps, initial_time, num_cycles=8):
"""Simple multi-cycle scheduler. Not including paper/fastai three-phase things or asymmetry."""
cycle_length = int(num_training_steps / num_cycles)
def lr_lambda(current_step):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps) % cycle_length
if fake_step < cycle_length / 2:
return float(fake_step / (cycle_length / 2))
else:
return float(2 - fake_step / (cycle_length / 2))
return LambdaLR(optimizer, lr_lambda, -1)
def get_budget_ramp(optimizer, hour_budget, num_cooldown_steps, num_training_steps, initial_time):
"""to the moon."""
max_lr = (num_training_steps - num_cooldown_steps) / num_training_steps
def lr_lambda(current_step):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
if fake_step > (num_training_steps - num_cooldown_steps):
return max(0.0, float(max_lr * (num_training_steps - fake_step) / num_cooldown_steps))
else:
return float(fake_step / num_training_steps)
return LambdaLR(optimizer, lr_lambda, -1)
def get_budget_inv_cosine_schedule(optimizer, hour_budget, num_cooldown_steps, num_training_steps, initial_time, num_cycles=0.5):
"""An inverse cosine schedule, with limited budget."""
ult_step = num_training_steps - num_cooldown_steps
max_lr = max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * (1 - ult_step / float(max(1, num_training_steps))))))
def lr_lambda(current_step):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
progress = 1 - fake_step / float(max(1, num_training_steps))
if fake_step > (num_training_steps - num_cooldown_steps):
return max(0.0, float(max_lr * (num_training_steps - fake_step) / num_cooldown_steps))
else:
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return LambdaLR(optimizer, lr_lambda, -1)
def get_budget_triangle(optimizer, hour_budget, num_training_steps, initial_time, base_percentage=0.5, falloff=0.5):
"""Linear increase from a percentage of the base learning rate, then linear decay.
plot min(0.5 + x * (1 - 0.5)/(1-0.25) / 1000, 1/0.25 - x / (1000 * 0.25)) from 0 to 1000 in the plot range 0 to 1
"""
def lr_lambda(current_step):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
return min(
base_percentage + fake_step * (1 - base_percentage) / (1 - falloff) / num_training_steps,
float(1 / falloff - fake_step / (num_training_steps * falloff)),
)
return LambdaLR(optimizer, lr_lambda, -1)
def get_budget_dive(optimizer, hour_budget, num_training_steps, num_warmup_steps=0, falloff=0.5):
"""Constant, then linear decay.
plot min(1, 1/0.5 - x / (1000 * 0.5)) from 0 to 1000 in the plot range 0 to 1
"""
def lr_lambda(current_step):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
if current_step < num_warmup_steps:
return float(fake_step) / float(max(1, num_warmup_steps))
else:
return min(1.0, float(1 / falloff - fake_step / (num_training_steps * falloff)))
return LambdaLR(optimizer, lr_lambda, -1)
def get_budget_polynomial_decay_with_warmup(optimizer, hour_budget, num_warmup_steps, num_training_steps, initial_time):
"""Follows the huggingface transformers scheduler with the same name, but gets an additional arg hour_budget"""
lr_init = optimizer.defaults["lr"]
lr_end = 0.0
power = 1.0
def lr_lambda(current_step: int):
fake_step = _get_fake_step(current_step, initial_time, hour_budget, num_training_steps)
if fake_step < num_warmup_steps:
return float(fake_step) / float(max(1, num_warmup_steps))
elif fake_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lr_range = lr_init - lr_end
decay_steps = num_training_steps - num_warmup_steps
pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps
decay = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(optimizer, lr_lambda, -1)
|
13ac927d432c78b0aaa7dc3f1af5b05adf07ab90
|
d93f40ed89e9e393cee0d10e720783b48deccaf2
|
/porespy/metrics/_meshtools.py
|
ad5717634e7be0c25a2d9b745a4050268041ad8e
|
[
"MIT"
] |
permissive
|
PMEAL/porespy
|
05a169dd67e28c17ff63a43276640d54bc6e037e
|
e78aa70e29697a381b7f26beb4adfdde513eaa45
|
refs/heads/dev
| 2023-08-16T12:55:53.343643
| 2023-07-31T15:29:50
| 2023-07-31T15:29:50
| 35,181,546
| 232
| 102
|
MIT
| 2023-09-14T04:46:19
| 2015-05-06T20:35:41
|
Python
|
UTF-8
|
Python
| false
| false
| 10,114
|
py
|
_meshtools.py
|
import logging
import numpy as np
import scipy.ndimage as spim
from porespy.tools import extend_slice, ps_round
from porespy.tools import _check_for_singleton_axes, Results
from porespy.tools import mesh_region
from skimage import measure
from porespy.tools import get_tqdm
from porespy import settings
__all__ = [
"mesh_surface_area",
"mesh_volume",
"region_interface_areas",
"region_surface_areas",
"region_volumes",
]
tqdm = get_tqdm()
logger = logging.getLogger(__name__)
def region_volumes(regions, mode='marching_cubes'):
r"""
Compute volume of each labelled region in an image
Parameters
----------
regions : ndarray
An image with labelled regions
mode : string
Controls the method used. Options are:
'marching_cubes' (default)
Finds a mesh for each region using the marching cubes algorithm
from ``scikit-image``, then finds the volume of the mesh using the
``trimesh`` package.
'voxel'
Calculates the region volume as the sum of voxels within each
region.
Returns
-------
volumes : ndarray
An array of shape [N by 1] where N is the number of labelled regions
in the image.
Examples
--------
`Click here
<https://porespy.org/examples/metrics/reference/mesh_volumes.html>`_
to view online example.
"""
slices = spim.find_objects(regions)
vols = np.zeros([len(slices), ])
msg = "Computing region volumes".ljust(60)
for i, s in enumerate(tqdm(slices, desc=msg, **settings.tqdm)):
region = regions[s] == (i + 1)
if mode == 'marching_cubes':
vols[i] = mesh_volume(region)
elif mode.startswith('voxel'):
vols[i] = region.sum(dtype=np.int64)
return vols
def mesh_volume(region):
r"""
Compute the volume of a single region by meshing it
Parameters
----------
region : ndarray
An image with a single region labelled as ``True`` (or > 0)
Returns
-------
volume : float
The volume of the region computed by applyuing the marching cubes
algorithm to the region, then finding the mesh volume using the
``trimesh`` package.
Examples
--------
`Click here
<https://porespy.org/examples/metrics/reference/mesh_volume.html>`_
to view online example.
"""
try:
from trimesh import Trimesh
except ModuleNotFoundError:
msg = 'The trimesh package can be installed with pip install trimesh'
raise ModuleNotFoundError(msg)
mc = mesh_region(region > 0)
m = Trimesh(vertices=mc.verts, faces=mc.faces, vertex_normals=mc.norm)
if m.is_watertight:
vol = np.abs(m.volume)
else:
vol = np.nan
return vol
def region_surface_areas(regions, voxel_size=1, strel=None):
r"""
Extract the surface area of each region in a labeled image.
Optionally, it can also find the the interfacial area between all
adjoining regions.
Parameters
----------
regions : ndarray
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
areas : list
A list containing the surface area of each region, offset by 1, such
that the surface area of region 1 is stored in element 0 of the list.
Examples
--------
`Click here
<https://porespy.org/examples/metrics/reference/region_surface_areas.html>`_
to view online example.
"""
logger.info('Finding surface area of each region')
im = regions
if strel is None:
strel = ps_round(1, im.ndim, smooth=False)
# Get 'slices' into im for each pore region
slices = spim.find_objects(im)
# Initialize arrays
Ps = np.arange(1, np.amax(im) + 1)
sa = np.zeros_like(Ps, dtype=float)
# Start extracting marching cube area from im
msg = "Computing region surface area".ljust(60)
for i in tqdm(Ps, desc=msg, **settings.tqdm):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
mesh = mesh_region(region=mask_im, strel=strel)
sa[reg] = mesh_surface_area(mesh)
result = sa * voxel_size**2
return result
def mesh_surface_area(mesh=None, verts=None, faces=None):
r"""
Calculate the surface area of a meshed region
Parameters
----------
mesh : tuple
The tuple returned from the ``mesh_region`` function
verts : array
An N-by-ND array containing the coordinates of each mesh vertex
faces : array
An N-by-ND array indicating which elements in ``verts`` form a mesh
element.
Returns
-------
surface_area : float
The surface area of the mesh, calculated by
``skimage.measure.mesh_surface_area``
Notes
-----
This function simply calls ``scikit-image.measure.mesh_surface_area``, but
it allows for the passing of the ``mesh`` tuple returned by the
``mesh_region`` function, entirely for convenience.
Examples
--------
`Click here
<https://porespy.org/examples/metrics/reference/mesh_surface_area.html>`_
to view online example.
"""
if mesh:
verts = mesh.verts
faces = mesh.faces
else:
if (verts is None) or (faces is None):
raise Exception('Either mesh or verts and faces must be given')
surface_area = measure.mesh_surface_area(verts, faces)
return surface_area
def region_interface_areas(regions, areas, voxel_size=1, strel=None):
r"""
Calculate the interfacial area between all pairs of adjecent regions
Parameters
----------
regions : ndarray
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
areas : array_like
A list containing the areas of each regions, as determined by
``region_surface_area``. Note that the region number and list index
are offset by 1, such that the area for region 1 is stored in
``areas[0]``.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
areas : Results object
A custom object with the following data added as named attributes:
'conns'
An N-regions by 2 array with each row containing the region
number of an adjacent pair of regions. For instance, if
``conns[0, 0]`` is 0 and ``conns[0, 1]`` is 5, then row 0 of
``area`` contains the interfacial area shared by regions 0 and 5.
'area'
The area calculated for each pair of regions in ``conns``
Examples
--------
`Click here
<https://porespy.org/examples/metrics/reference/region_interface_areas.html>`_
to view online example.
"""
logger.info('Finding interfacial areas between each region')
im = regions
_check_for_singleton_axes(im)
ball = ps_round(1, im.ndim, smooth=False)
if strel is None:
strel = np.copy(ball)
# Get 'slices' into im for each region
slices = spim.find_objects(im)
# Initialize arrays
Ps = np.arange(1, np.amax(im) + 1)
sa = np.zeros_like(Ps, dtype=float)
sa_combined = [] # Difficult to preallocate since number of conns unknown
cn = []
# Start extracting area from im
msg = "Computing interfacial area between regions".ljust(60)
for i in tqdm(Ps, desc=msg, **settings.tqdm):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
sa[reg] = areas[reg]
im_w_throats = spim.binary_dilation(input=mask_im,
structure=ball)
im_w_throats = im_w_throats * sub_im
Pn = np.unique(im_w_throats)[1:] - 1
for j in Pn:
if j > reg:
cn.append([reg, j])
merged_region = im[(min(slices[reg][0].start,
slices[j][0].start)):
max(slices[reg][0].stop,
slices[j][0].stop),
(min(slices[reg][1].start,
slices[j][1].start)):
max(slices[reg][1].stop,
slices[j][1].stop)]
merged_region = ((merged_region == reg + 1)
+ (merged_region == j + 1))
mesh = mesh_region(region=merged_region, strel=strel)
sa_combined.append(mesh_surface_area(mesh))
# Interfacial area calculation
cn = np.array(cn)
ia = 0.5 * (sa[cn[:, 0]] + sa[cn[:, 1]] - sa_combined)
ia[ia <= 0] = 1
result = Results()
result.conns = cn
result.area = ia * voxel_size**2
return result
|
d97102dc5ff93107494ccf1e07c30505b3de951e
|
05209150ee39fac5d470516689e126bf5d8da55b
|
/setup.py
|
ed54028b3ce03da2fc8f7d77a6526a3c4c1dbea9
|
[
"BSD-3-Clause"
] |
permissive
|
aio-libs/aiocache
|
7b694237583b233511d8ca6018d478405626b366
|
63600a6ebc39c1f4f1cd2677d8629b71cbbcf380
|
refs/heads/master
| 2023-08-21T23:18:33.890923
| 2023-08-16T18:23:56
| 2023-08-16T18:23:56
| 69,653,334
| 488
| 84
|
BSD-3-Clause
| 2023-09-13T18:16:11
| 2016-09-30T09:25:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
setup.py
|
import re
from pathlib import Path
from setuptools import setup
p = Path(__file__).with_name("aiocache") / "__init__.py"
try:
version = re.findall(r"^__version__ = \"([^']+)\"\r?$", p.read_text(), re.M)[0]
except IndexError:
raise RuntimeError("Unable to determine version.")
readme = Path(__file__).with_name("README.rst").read_text()
setup(
name="aiocache",
version=version,
author="Manuel Miranda",
url="https://github.com/aio-libs/aiocache",
author_email="manu.mirandad@gmail.com",
description="multi backend asyncio cache",
long_description=readme,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Framework :: AsyncIO",
],
packages=("aiocache",),
install_requires=None,
extras_require={
"redis": ["redis>=4.2.0"],
"memcached": ["aiomcache>=0.5.2"],
"msgpack": ["msgpack>=0.5.5"],
},
include_package_data=True,
)
|
9bec401442b75400dd4bf52aa20c91df88929e37
|
380face90c8169eeca9e61af566ab1ba7590c05b
|
/vimdoc/regex.py
|
8e977cd163ec74d26e34a5f2b1b6cb79dc93d804
|
[
"Apache-2.0"
] |
permissive
|
google/vimdoc
|
778d41b742e611aeea7e6e5645a360395f9c8bbe
|
ed17321a17a8cf4c21d2f7f77352fc9b0fb42e66
|
refs/heads/main
| 2023-08-26T05:16:55.444522
| 2022-11-26T18:56:21
| 2022-11-26T18:56:21
| 17,838,265
| 250
| 37
|
Apache-2.0
| 2023-04-10T00:01:12
| 2014-03-17T18:12:29
|
Python
|
UTF-8
|
Python
| false
| false
| 10,764
|
py
|
regex.py
|
# -*- coding: utf-8 -*-
"""When you gaze into the abyss, the abyss gazes also into you.
>>> comment_leader.match(' echo "string"')
>>> comment_leader.match(' " Woot') is not None
True
>>> comment_leader.match('"') is not None
True
>>> comment_leader.sub('', '" foo')
'foo'
>>> comment_leader.sub('', '"bar')
'bar'
>>> line_continuation.match(' foo')
>>> line_continuation.match(' \\ foo') is not None
True
>>> blank_comment_line.match('')
>>> blank_comment_line.match('" foo')
>>> blank_comment_line.match('"') is not None
True
>>> blank_comment_line.match(' " ') is not None
True
>>> blank_code_line.match('foo')
>>> blank_code_line.match('"')
>>> blank_code_line.match(' ') is not None
True
>>> blank_code_line.match('') is not None
True
>>> block_directive.match(' foo')
>>> block_directive.match(' " foo')
>>> block_directive.match(' " @foo').groups()
('foo', '')
>>> block_directive.match(' " @foo bar baz').groups()
('foo', 'bar baz')
>>> section_args.match('')
>>> section_args.match('Introduction').groups()
('Introduction', None)
>>> section_args.match('The Beginning, beg').groups()
('The Beginning', 'beg')
>>> parent_section_args.match('123')
>>> parent_section_args.match('foo').groups()
('foo',)
>>> backmatter_args.match('123')
>>> backmatter_args.match('foo').groups()
('foo',)
>>> dict_args.match('MyDict attr')
>>> dict_args.match('MyDict').groups()
('MyDict', None)
>>> dict_args.match('MyDict.attr').groups()
('MyDict', 'attr')
>>> usage_args.match('foo - bar - baz')
>>> usage_args.match('{foo} bar [][baz]') is not None
True
>>> usage_args.match('{foo...} bar... [][baz...]') is not None
True
>>> usage_arg.findall('{foo} bar [][baz]')
['{foo}', 'bar', '[]', '[baz]']
>>> usage_arg.match('{one..} two.. [three..]')
>>> usage_arg.findall('{one...} two... [three...]')
['{one...}', 'two...', '[three...]']
>>> no_args.match('foo')
>>> no_args.match('') is not None
True
>>> any_args.match('foo') is not None
True
>>> any_args.match('') is not None
True
>>> one_arg.match('foo') is not None
True
>>> one_arg.match('') is None
True
>>> maybe_word.match('Hello There')
>>> maybe_word.match('HelloThere') is not None
True
>>> maybe_word.match('') is not None
True
>>> throw_args.match('-@!813')
>>> throw_args.match('MyError').groups()
('MyError', None)
>>> throw_args.match('MyError on occasion').groups()
('MyError', 'on occasion')
>>> default_args.match('foo!bar')
>>> default_args.match('{foo}=bar')
>>> default_args.match('[foo]=bar').groups()
('[foo]', 'bar')
>>> default_args.match('foo=bar').groups()
('foo', 'bar')
>>> default_args.match('someVar = Some weird ==symbols==').groups()
('someVar', 'Some weird ==symbols==')
>>> order_args.match('some* weird! id"s')
>>> order_args.match('foo bar baz') is not None
True
>>> order_args.match('foo bar baz +').groups()
('foo bar baz +',)
>>> order_arg.findall('foo bar baz -')
['foo', 'bar', 'baz', '-']
>>> stylizing_args.match('Your Plugin')
>>> stylizing_args.match('MyPlugin').groups()
('MyPlugin',)
>>> stylizing_args.match('っoの').groups()
('っoの',)
>>> function_line.match('foo bar')
>>> function_line.match('fu MyFunction()').groups()
(None, 'MyFunction', '')
>>> function_line.match('funct namespace#MyFunction(foo, bar)').groups()
('namespace#', 'MyFunction', 'foo, bar')
>>> function_line.match('fu!a#b#c#D(...) abort dict range').groups()
('a#b#c#', 'D', '...')
>>> command_line.match('com -nargs=+ -bang MyCommand call #this').groups()
('-nargs=+ -bang ', 'MyCommand')
>>> setting_line.match('let s:myglobal_var = 1')
>>> setting_line.match('let g:myglobal_var = 1').groups()
('myglobal_var',)
>>> setting_line.match('let g:mysettings.var = 0').groups()
('mysettings.var',)
>>> flag_line.match("call s:plugin.Flag('myflag')")
>>> flag_line.match("call s:plugin.Flag('myflag', 0)").groups()
('myflag', None, '0')
>>> flag_line.match('cal g:my["flags"].Flag("myflag", 1)').groups()
(None, 'myflag', '1')
>>> flag_line.match("call s:plugin.Flag('Some weird '' flag', 'X')").groups()
("Some weird '' flag", None, "'X'")
>>> flag_line.match(
... r'call s:plugin.Flag("Another \\" weird flag", [])').groups()
(None, 'Another \\\\" weird flag', '[]')
>>> flag_line.match("call s:plugin.Flag('myflag', 1)").groups()
('myflag', None, '1')
>>> flag_line.match('call s:plugin.Flag("myflag", '
... "get(g:, 'foo', []) )").groups()
(None, 'myflag', "get(g:, 'foo', [])")
>>> numbers_args.match('1 two 3')
>>> numbers_args.match('1 2 3').groups()
('1 2 3',)
>>> number_arg.findall('1 2 3')
['1', '2', '3']
>>> vim_error.match('EVERYTHING')
>>> vim_error.match('E101') is not None
True
>>> inline_directive.match('@function(bar)').groups()
('function', 'bar')
>>> inline_directive.sub(
... lambda match: '[{}]'.format(match.group(2)),
... 'foo @function(bar) baz @link(quux) @this')
'foo [bar] baz [quux] [None]'
>>> function_arg.findall('foo, bar, baz, ...')
['foo', 'bar', 'baz', '...']
>>> bad_separator.search('foo, bar, baz')
>>> bad_separator.search('foo bar baz')
>>> bad_separator.search('foo, , bar, baz') is not None
True
>>> bad_separator.search('foo bar baz') is not None
True
>>> bad_separator.sub('', 'foo bar, , baz')
'foo bar, baz'
>>> vimdoc_leader.match('"" Foo') is not None
True
>>> vimdoc_leader.match('""') is not None
True
>>> vimdoc_leader.match('" " ')
>>> empty_vimdoc_leader.match(' ""') is not None
True
>>> empty_vimdoc_leader.match('"" ')
"""
import re
def _DelimitedRegex(pattern):
return re.compile(r"""
# Shouldn't follow any non-whitespace character.
(?<!\S)
# pattern
(?:{})
# Shouldn't be directly followed by alphanumeric (but "," and "." are okay).
(?!\w)
""".format(pattern), re.VERBOSE)
# Regular expression soup!
vimdoc_leader = re.compile(r'^\s*"" ?')
empty_vimdoc_leader = re.compile(r'^\s*""$')
comment_leader = re.compile(r'^\s*" ?')
line_continuation = re.compile(r'^\s*\\')
blank_comment_line = re.compile(r'^\s*"\s*$')
blank_code_line = re.compile(r'^\s*$')
block_directive = re.compile(r'^\s*"\s*@([a-zA-Z_][a-zA-Z0-9_]*)(?:\s+|$)(.*)')
section_args = re.compile(r"""
^
# MATCH GROUP 1: The Name
(
# Non-commas or escaped commas or escaped escapes.
# Must not end with a space.
(?:[^\\,]|\\.)+\S
)
# Optional identifier
(?:
# Separated by comma and whitespace.
,\s*
# MATCHGROUP 2: The identifier
([a-zA-Z_-][a-zA-Z0-9_-]*)
)?
$
""", re.VERBOSE)
parent_section_args = re.compile(r'([a-zA-Z_-][a-zA-Z0-9_-]*)')
backmatter_args = re.compile(r'([a-zA-Z_-][a-zA-Z0-9_-]*)')
dict_args = re.compile(r"""
^([a-zA-Z_][a-zA-Z0-9]*)(?:\.([a-zA-Z_][a-zA-Z0-9_]*))?$
""", re.VERBOSE)
default_args = re.compile(r"""
^( # MATCH GROUP 1: The variable name.
(?: # Any of:
# Square brackets with an identifier within.
\[[a-zA-Z_][a-zA-Z0-9_]*\]
|
# An identifier
[a-zA-Z_][a-zA-Z0-9_]*
)
) # An equals sign, optional spaces.
\s*=\s*
# MATCH GROUP 2: The value.
(.*)$
""", re.VERBOSE)
numbers_args = re.compile(r'^((?:\s|\d)*)$')
number_arg = re.compile(r'\d+')
usage_args = re.compile(r"""
^((?:
# Optional separating whitespace.
\s*
(?:
# Curly braces with an optional identifier within.
{(?:[a-zA-Z_][a-zA-Z0-9_]*(?:\.\.\.)?)?}
|
# Square brackets with an optional identifier within.
\[(?:[a-zA-Z_.][a-zA-Z0-9_.]*(?:\.\.\.)?)?\]
|
# An identifier
[a-zA-Z_][a-zA-Z0-9_]*(?:\.\.\.)?
|
# A joint argument hole
{\]
)
# Many times.
)*)$
""", re.VERBOSE)
usage_arg = re.compile(r"""
# Curly braces with an optional identifier within.
{(?:[a-zA-Z_][a-zA-Z0-9_]*(?:\.\.\.)?)?}
|
# Square brackets with an optional identifier within.
\[(?:[a-zA-Z_][a-zA-Z0-9_]*(?:\.\.\.)?)?\]
|
# The special required-followed-by-optional hole
{\]
|
# An identifier
[a-zA-Z_][a-zA-Z0-9_]*(?:\.\.\.)?
""", re.VERBOSE)
order_args = re.compile(r'^((?:\s*[a-zA-Z_][a-zA-Z0-9_-]*)+(?:\s*[+-])?)$')
order_arg = re.compile(r'([a-zA-Z_][a-zA-Z0-9_-]*|[+-])')
no_args = re.compile(r'^$')
any_args = re.compile(r'^(.*)$')
one_arg = re.compile(r'^(.+)$')
maybe_word = re.compile(r'^\s*([a-zA-Z_][a-zA-Z0-9_]*)?\s*$')
throw_args = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)(?:\s+(.*))?$')
vim_error = re.compile(r'^E\d+$')
stylizing_args = re.compile(r'^(\S+)$')
function_line = re.compile(r"""
# Leading whitespace.
^\s*
# fu[nction]
fu(?:n|nc|nct|ncti|nctio|nction)?
# Separation (with an optional bang)
(?:\s*!\s*|\s+)
# GROUP 1: Autocmd namespace.
((?:[a-zA-Z_][a-zA-Z0-9_]*\#)+)?
# GROUP 2: Function name.
([a-zA-Z_][a-zA-Z0-9_]*)
# Open parens
\s*\(
# GROUP 3: Parameters
# This is more permissive than it has to be. Vimdoc is not a parser.
([^\)]*)
# Close parens
\)
""", re.VERBOSE)
command_line = re.compile(r"""
# Leading whitespace.
^\s*
# com[mand]
com(?:m|ma|man|mand)?
# Optional bang.
(?:\s*!\s*|\s+)
# GROUP 1: Command arguments.
((?:-\S+\s*)*)
# GROUP 2: Command name.
([a-zA-Z_][a-zA-Z0-9_]*)
""", re.VERBOSE)
setting_line = re.compile(r"""
# Definition start.
^\s*let\s+g:
# GROUP 1: Setting name.
# May include [] (indexing), {} (interpolation), and . (dict of settings).
([a-zA-Z_][a-zA-Z0-9_{}\[\].]*)
""", re.VERBOSE)
setting_scope = re.compile(r'[a-z]:')
flag_line = re.compile(r"""
# Definition start.
^\s*call?\s*.*\.Flag\(
# Shit's about to get real.
(?:
# GROUP 1: The flag name in single quotes.
'(
# Double single quotes escapes single quotes.
(?:[^']|'')*
)'
| # GROUP 2: The flag name in double quotes.
"(
# No escapes or double quotes, or one escaped anything.
(?:[^\\"]|\\.)*
)"
),\s*
(?:
# GROUP 3: Default value.
((?:
# Any non-parenthesis character.
[^()]
| # Any non-parenthesis character inside a pair of parentheses. Doesn't
# handle nesting to arbitrary depth.
\([^()]+\)
)+?)
\s*\)
)?
""", re.VERBOSE)
inline_directive = re.compile(r'@([a-zA-Z_][a-zA-Z0-9_]*)(?:\(([^\s)]+)\))?')
name_hole = re.compile(r'<>')
arg_hole = re.compile(r'{\]')
required_hole = _DelimitedRegex(r'{}')
optional_hole = _DelimitedRegex(r'\[\]')
required_arg = _DelimitedRegex(r'{([a-zA-Z_][a-zA-Z0-9_]*(?:\.\.\.)?)}')
optional_arg = _DelimitedRegex(r'\[([a-zA-Z_][a-zA-Z0-9_]*(?:\.\.\.)?)\]')
namehole_escape = re.compile(r'<\|(\|*)>')
requiredhole_escape = re.compile(r'{\|(\|*)}')
optionalhole_escape = re.compile(r'\[\|(\|*)\]')
bad_separator = re.compile(r"""
(?:
# Extra comma-spaces
(?:,\ )+(?=,\ )
|
# Multiple spaces
\ +(?=\ )
)
""", re.VERBOSE)
function_arg = re.compile(r'([a-zA-Z_][a-zA-Z0-9_]*|\.\.\.)')
list_item = re.compile(r'^\s*([*+-]|\d+\.)\s+')
|
24c4e55bb1e841c320a2d6ddc5261194d6254e89
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/plotly/py2/plotly/validators/layout/updatemenu/__init__.py
|
df6ff4d558631535a70577513194dba767a93d50
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 13,418
|
py
|
__init__.py
|
import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yanchor", parent_name="layout.updatemenu", **kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["auto", "top", "middle", "bottom"]),
**kwargs
)
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="y", parent_name="layout.updatemenu", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="xanchor", parent_name="layout.updatemenu", **kwargs
):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["auto", "left", "center", "right"]),
**kwargs
)
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="layout.updatemenu", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="visible", parent_name="layout.updatemenu", **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="type", parent_name="layout.updatemenu", **kwargs):
super(TypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["dropdown", "buttons"]),
**kwargs
)
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="templateitemname", parent_name="layout.updatemenu", **kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ShowactiveValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showactive", parent_name="layout.updatemenu", **kwargs
):
super(ShowactiveValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class PadValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="pad", parent_name="layout.updatemenu", **kwargs):
super(PadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Pad"),
data_docs=kwargs.pop(
"data_docs",
"""
b
The amount of padding (in px) along the bottom
of the component.
l
The amount of padding (in px) on the left side
of the component.
r
The amount of padding (in px) on the right side
of the component.
t
The amount of padding (in px) along the top of
the component.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="layout.updatemenu", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="layout.updatemenu", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class DirectionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="direction", parent_name="layout.updatemenu", **kwargs
):
super(DirectionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["left", "right", "up", "down"]),
**kwargs
)
import _plotly_utils.basevalidators
class ButtonValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="buttondefaults", parent_name="layout.updatemenu", **kwargs
):
super(ButtonValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Button"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class ButtonsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="buttons", parent_name="layout.updatemenu", **kwargs
):
super(ButtonsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Button"),
data_docs=kwargs.pop(
"data_docs",
"""
args
Sets the arguments values to be passed to the
Plotly method set in `method` on click.
args2
Sets a 2nd set of `args`, these arguments
values are passed to the Plotly method set in
`method` when clicking this button while in the
active state. Use this to create toggle
buttons.
execute
When true, the API method is executed. When
false, all other behaviors are the same and
command execution is skipped. This may be
useful when hooking into, for example, the
`plotly_buttonclicked` method and executing the
API command manually without losing the benefit
of the updatemenu automatically binding to the
state of the plot through the specification of
`method` and `args`.
label
Sets the text label to appear on the button.
method
Sets the Plotly method to be called on click.
If the `skip` method is used, the API
updatemenu will function as normal but will
perform no API calls and will not bind
automatically to state updates. This may be
used to create a component interface and attach
to updatemenu events manually via JavaScript.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
visible
Determines whether or not this button is
visible.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="borderwidth", parent_name="layout.updatemenu", **kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="layout.updatemenu", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="layout.updatemenu", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class ActiveValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="active", parent_name="layout.updatemenu", **kwargs):
super(ActiveValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
min=kwargs.pop("min", -1),
role=kwargs.pop("role", "info"),
**kwargs
)
|
6fe5add4f2550aceb7747c3c1e436f8719081c2f
|
85c668af40853f5ee48fbe8c4045df1a5dd4104e
|
/examples/volumetric/volumeFromMesh.py
|
ac6cb09ee9159de03c4492e7e1892f47437275b5
|
[
"MIT",
"LicenseRef-scancode-public-domain",
"OFL-1.1"
] |
permissive
|
marcomusy/vedo
|
771db91bca05cda864fc7d1776d9140726676704
|
9a9f7c5e9ebf135e5c745c521c898866e3ede0ef
|
refs/heads/master
| 2023-08-21T12:56:35.545713
| 2023-08-14T14:39:37
| 2023-08-14T14:39:37
| 110,261,047
| 1,419
| 206
|
MIT
| 2023-09-02T18:38:22
| 2017-11-10T15:17:47
|
Python
|
UTF-8
|
Python
| false
| false
| 422
|
py
|
volumeFromMesh.py
|
"""Generate a Volume with the signed distance from a Mesh,
then generate the isosurface at distance -0.5"""
from vedo import *
mesh = Mesh(dataurl+"beethoven.ply").subdivide()
mesh.color('k').point_size(3) # render mesh as points
# Generate signed distance volume
vol = mesh.signed_distance(dims=(40,40,40))
# Generate an isosurface at distance -0.5
iso = vol.isosurface(-0.5)
show(mesh, iso, __doc__, axes=1).close()
|
69300c5e172f267d2cd6f0c51643b012570d2039
|
88ab2245b599c7143e22e163151866130675bf0d
|
/tests/test_encoding.py
|
560a6e5a3b15935476dc8af0a36a3c44f537b134
|
[
"MIT"
] |
permissive
|
jendrikseipp/vulture
|
0181d47f29db9a9257735faf58d2ca8b54ed1864
|
2586ee6003ea0b5822838bb2072bad9289b130d3
|
refs/heads/main
| 2023-09-01T20:02:18.612664
| 2023-08-31T07:58:17
| 2023-08-31T08:03:20
| 84,045,805
| 2,745
| 147
|
MIT
| 2023-08-31T08:03:21
| 2017-03-06T08:00:18
|
Python
|
UTF-8
|
Python
| false
| false
| 993
|
py
|
test_encoding.py
|
import codecs
from . import v
from vulture.utils import ExitCode
assert v # Silence pyflakes.
def test_encoding1(v):
v.scan(
"""\
# -*- coding: utf-8 -*-
pass
"""
)
assert v.exit_code == ExitCode.NoDeadCode
def test_encoding2(v):
v.scan(
"""\
#! /usr/bin/env python
# -*- coding: utf-8 -*-
pass
"""
)
assert v.exit_code == ExitCode.NoDeadCode
def test_non_utf8_encoding(v, tmp_path):
code = ""
name = "non_utf8"
non_utf_8_file = tmp_path / (name + ".py")
with open(non_utf_8_file, mode="wb") as f:
f.write(codecs.BOM_UTF16_LE)
f.write(code.encode("utf_16_le"))
v.scavenge([non_utf_8_file])
assert v.exit_code == ExitCode.InvalidInput
def test_utf8_with_bom(v, tmp_path):
name = "utf8_bom"
filepath = tmp_path / (name + ".py")
# utf8_sig prepends the BOM to the file.
filepath.write_text("", encoding="utf-8-sig")
v.scavenge([filepath])
assert v.exit_code == ExitCode.NoDeadCode
|
49b6cd71576243aa736e24e8642000ff35287dad
|
d0cfa67c47fe18efe0d3658171b6a88174e22706
|
/tests/test_system/test_system.py
|
0f69d041c36a0b046b2759b3a5d1418fd472e991
|
[
"Apache-2.0"
] |
permissive
|
gilesknap/gphotos-sync
|
42fd20a5eef6131377ced0d3c7c669c011e5494b
|
1516059399de213ccb2be87c6abc61cb61af0e60
|
refs/heads/main
| 2023-09-02T12:18:26.157304
| 2023-08-29T20:00:36
| 2023-08-29T20:00:36
| 98,652,766
| 1,853
| 173
|
Apache-2.0
| 2023-08-05T20:51:38
| 2017-07-28T13:35:07
|
Python
|
UTF-8
|
Python
| false
| false
| 18,395
|
py
|
test_system.py
|
import os
from datetime import datetime
from pathlib import Path
from typing import List
from unittest import TestCase
from unittest.mock import Mock, patch
from requests.exceptions import HTTPError
import gphotos_sync.Utils as Utils
import tests.test_setup as ts
from gphotos_sync.BadIds import BadIds
from gphotos_sync.GooglePhotosDownload import GooglePhotosDownload # type: ignore
from gphotos_sync.LocalData import LocalData
from tests.test_account import TestAccount
photos_root = Path("photos")
albums_root = Path("albums")
comparison_root = Path("comparison")
class TestSystem(TestCase):
def test_sys_favourites_and_dates(self):
"""Download favourite images in test library.
Also Check that dates are set correctly
"""
with ts.SetupDbAndCredentials() as s:
args = ["--favourites-only", "--max-retries", "6", "--max-threads", "2"]
s.test_setup(
"test_sys_favourites", args=args, trash_files=True, trash_db=True
)
s.gp.start(s.parsed_args)
db = LocalData(s.root)
# Total of 1 out of media items
db.cur.execute("SELECT COUNT() FROM SyncFiles")
count = db.cur.fetchone()
self.assertEqual(1, count[0])
name = s.root / "photos/2017/09/IMG_2117.JPG"
date = datetime.fromtimestamp(os.path.getmtime(str(name)))
expected = datetime(2017, 9, 26, 15, 29, 44)
self.assertEqual(
expected, date.replace(microsecond=0), "Modify date not set correctly"
)
# TODO: this fails on Github Actions - probably its the filesystem
# rather than the OS
#
# if os.name == "nt":
# date = datetime.fromtimestamp(os.path.getctime(name))
# expected = datetime(2017, 9, 26, 15, 29, 44)
# self.assertEqual(
# expected,
# date.replace(microsecond=0),
# "Create date not set correctly",
# )
def ____test_sys_archived(self):
# TODO archinging not working
"""Download archived images in test library."""
with ts.SetupDbAndCredentials() as s:
args = ["--archived", "--skip-albums", "--start-date", "2017-01-01"]
s.test_setup(
"test_sys_archived", args=args, trash_files=True, trash_db=True
)
s.gp.start(s.parsed_args)
db = LocalData(s.root)
# Total of 1 out of media items
db.cur.execute("SELECT COUNT() FROM SyncFiles")
count = db.cur.fetchone()
self.assertEqual(1, count[0])
def test_shared_albums(self):
"""Download favourite images in test library."""
with ts.SetupDbAndCredentials() as s:
args = ["--skip-files"]
s.test_setup(
"test_shared_albums", args=args, trash_files=True, trash_db=True
)
s.gp.start(s.parsed_args)
t = (
TestAccount.album_image_count
+ TestAccount.album_shared_image_count
+ TestAccount.shared_album_image_count
+ TestAccount.shared_album_shared_image_count
)
with LocalData(s.root) as db:
db.cur.execute("SELECT COUNT() FROM AlbumFiles")
count = db.cur.fetchone()
self.assertEqual(
t,
count[0],
"expected {} files in all albums including shared".format(t),
)
with ts.SetupDbAndCredentials() as s:
args = ["--skip-files", "--skip-shared-albums"]
s.test_setup(
"test_shared_albums", args=args, trash_files=True, trash_db=True
)
s.gp.start(s.parsed_args)
# note that unless we use --no-album-index the shared files in the
# visible album will show up here
t = (
TestAccount.album_image_count + TestAccount.album_shared_image_count
) # see above
with LocalData(s.root) as db:
db.cur.execute("SELECT COUNT() FROM AlbumFiles")
count = db.cur.fetchone()
self.assertEqual(
t,
count[0],
"expected {} files in all albums excluding shared".format(t),
)
def test_sys_album_add_file(self):
"""tests that the album links get re-created in a new folder with
a new last-date prefix when a recent photo is added to an album,
also that the old folder is removed"""
with ts.SetupDbAndCredentials() as s:
args = ["--start-date", "2017-09-19", "--end-date", "2017-09-20"]
s.test_setup(
"test_sys_album_add_file", args=args, trash_db=True, trash_files=True
)
s.gp.start(s.parsed_args)
pat = str(albums_root / "2017" / "0923 Clones😀" / "*.*")
files = sorted(s.root.glob(pat))
self.assertEqual(4, len(files))
# spoof the album to pretend it only got 3 files up to 2017-09-20
db = LocalData(s.root)
db.cur.execute(
"UPDATE Albums SET EndDate='2017-09-20',"
"Size=3 WHERE "
"AlbumName='Clones😀'"
)
db.store()
args = [
"--start-date",
"2017-09-19",
"--end-date",
"2017-09-23",
"--index-only",
]
s.__exit__()
s.test_setup("test_sys_album_add_file", args=args)
s.gp.start(s.parsed_args)
# the rescan will reset the date so set it back
db = LocalData(s.root)
db.cur.execute(
"UPDATE Albums SET EndDate='2017-09-20' " "WHERE AlbumName='Clones😀'"
)
db.store()
args = ["--skip-index", "--skip-files"]
s.__exit__()
s.test_setup("test_sys_album_add_file", args=args)
s.gp.start(s.parsed_args)
pat = str(albums_root / "2017" / "0920 Clones😀" / "*.*")
files = sorted(s.root.glob(pat))
self.assertEqual(4, len(files))
should_be_gone = s.root / albums_root / "2017" / "0923 Clones😀"
self.assertFalse(should_be_gone.exists())
# test --album-date-by-first-photo
# force re-download of the album
db.cur.execute(
"UPDATE Albums SET Downloaded=0 " "WHERE AlbumName='Clones😀'"
)
db.store()
args = ["--skip-index", "--skip-files", "--album-date-by-first-photo"]
s.__exit__()
s.test_setup("test_sys_album_add_file", args=args)
s.gp.start(s.parsed_args)
pat = str(albums_root / "2017" / "0919 Clones😀" / "*.*")
files = sorted(s.root.glob(pat))
self.assertEqual(4, len(files))
should_be_gone = s.root / albums_root.absolute() / "2017" / "0920 Clones😀"
self.assertFalse(should_be_gone.exists())
def test_system_date_range(self):
with ts.SetupDbAndCredentials() as s:
args = [
"--start-date",
"2016-01-01",
"--end-date",
"2017-01-01",
"--skip-albums",
"--index-only",
]
s.test_setup(
"test_system_date_range", args=args, trash_db=True, trash_files=True
)
s.gp.start(s.parsed_args)
db = LocalData(s.root)
# Total of 10 images
db.cur.execute("SELECT COUNT() FROM SyncFiles")
count = db.cur.fetchone()
self.assertEqual(10, count[0])
with ts.SetupDbAndCredentials() as s:
args = [
"--start-date",
"2016-01-01",
"--end-date",
"2017-01-01",
"--use-hardlinks",
"--album",
"Clones😀",
]
s.test_setup(
"test_system_hard_link", args=args, trash_db=True, trash_files=True
)
s.gp.start(s.parsed_args)
with LocalData(s.root) as db:
# Total of 4 images
db.cur.execute("SELECT COUNT() FROM AlbumFiles")
count = db.cur.fetchone()
self.assertEqual(4, count[0])
pat = str(albums_root / "*" / "*Clones😀" / "*")
links: List[Path] = sorted(s.root.glob(pat))
self.assertEqual(4, len(links))
for link in links:
self.assertTrue(not link.is_symlink())
# verify that switching to soft links in the same folder
# overwrites all hard links
args = [
"--start-date",
"2016-01-01",
"--end-date",
"2017-01-01",
"--album",
"Clones😀",
"--flush-index",
]
s.__exit__(self)
s.test_setup(
"test_system_hard_link", args=args, trash_db=False, trash_files=False
)
s.gp.start(s.parsed_args)
with LocalData(s.root) as db:
# Total of 4 images
db.cur.execute("SELECT COUNT() FROM AlbumFiles")
count = db.cur.fetchone()
self.assertEqual(4, count[0])
pat = str(albums_root / "*" / "*Clones😀" / "*")
links = sorted(s.root.glob(pat))
self.assertEqual(4, len(links))
for link in links:
self.assertTrue(link.is_symlink())
def test_system_skip_video(self):
with ts.SetupDbAndCredentials() as s:
args = [
"--start-date",
"2017-01-01",
"--end-date",
"2018-01-01",
"--skip-albums",
"--index-only",
]
s.test_setup(
"test_system_skip_video", args=args, trash_db=True, trash_files=True
)
s.gp.start(s.parsed_args)
db = LocalData(s.root)
# Total of 20 media items
db.cur.execute("SELECT COUNT() FROM SyncFiles")
count = db.cur.fetchone()
self.assertEqual(20, count[0])
db.store()
del db
with ts.SetupDbAndCredentials() as s:
args = [
"--start-date",
"2017-01-01",
"--end-date",
"2018-01-01",
"--skip-albums",
"--index-only",
"--skip-video",
]
s.test_setup(
"test_system_skip_video", args=args, trash_db=True, trash_files=True
)
s.gp.start(s.parsed_args)
db = LocalData(s.root)
# Total of 10 media items
db.cur.execute("SELECT COUNT() FROM SyncFiles")
count = db.cur.fetchone()
self.assertEqual(10, count[0])
def test_system_retry_download(self):
with ts.SetupDbAndCredentials() as s:
# note we do index albums because there was a bug on retrying
# downloads with albums enabled
args = [
"--start-date",
"2017-01-01",
"--end-date",
"2018-01-01",
"--skip-video",
]
s.test_setup(
"test_system_retry_download", args=args, trash_db=True, trash_files=True
)
s.gp.start(s.parsed_args)
pat = str(photos_root / "2017" / "??" / "*.[JjpP]*")
files = sorted(s.root.glob(pat))
self.assertEqual(15, len(files))
files[0].unlink()
files = sorted(s.root.glob(pat))
self.assertEqual(14, len(files))
# re-run should not download since file is marked as downloaded
with ts.SetupDbAndCredentials() as s:
s.test_setup("test_system_retry_download", args=args)
s.gp.start(s.parsed_args)
files = sorted(s.root.glob(pat))
self.assertEqual(14, len(files))
# but adding --retry-download should get us back to 10 files
args.append("--retry-download")
with ts.SetupDbAndCredentials() as s:
s.test_setup("test_system_retry_download", args=args)
s.gp.start(s.parsed_args)
files = sorted(s.root.glob(pat))
with ts.SetupDbAndCredentials() as s:
args = [
"--start-date",
"2017-01-01",
"--end-date",
"2018-01-01",
"--skip-video",
"--skip-albums",
"--do-delete",
]
s.test_setup("test_do_delete", args=args, trash_db=True, trash_files=True)
s.gp.start(s.parsed_args)
pat = str(photos_root / "2017" / "??" / "*.[JjpP]*")
files = sorted(s.root.glob(pat))
self.assertEqual(10, len(files))
db = LocalData(s.root)
# noinspection SqlWithoutWhere
db.cur.execute("DELETE FROM SyncFiles;")
db.store()
args.append("--skip-index")
with ts.SetupDbAndCredentials() as s:
s.test_setup("test_do_delete", args=args)
s.gp.start(s.parsed_args)
# should have removed all files
files = sorted(s.root.glob(pat))
self.assertEqual(0, len(files))
def test_system_incremental(self):
with ts.SetupDbAndCredentials() as s:
args = [
"--start-date",
TestAccount.start,
"--end-date",
TestAccount.end,
"--skip-albums",
"--index-only",
]
s.test_setup(
"test_system_incremental", args=args, trash_db=True, trash_files=True
)
s.gp.start(s.parsed_args)
db = LocalData(s.root)
db.cur.execute("SELECT COUNT() FROM SyncFiles")
count = db.cur.fetchone()
self.assertEqual(
TestAccount.image_count_2016,
count[0],
"expected {} items in 2016".format(TestAccount.image_count_2016),
)
# force an update to the 'most recently scanned file' record
# (this is normally only set for complete scans and was tested in
# test_sys_whole_library)
db.set_scan_date(Utils.string_to_date("2017-01-01"))
db.store()
with ts.SetupDbAndCredentials() as s:
args = ["--skip-albums", "--index-only"]
s.test_setup("test_system_incremental", args=args)
s.gp.start(s.parsed_args)
# this should add in everything in 2017 onwards (21 files)
db.cur.execute("SELECT COUNT() FROM SyncFiles")
count = db.cur.fetchone()
t = (
TestAccount.image_count_2016
+ TestAccount.item_count_2017
+ TestAccount.item_count_2020
)
self.assertEqual(
t, count[0], "expected file count from 2016 and 2017 to be {}".format(t)
)
d_date = db.get_scan_date()
self.assertEqual(d_date.date(), TestAccount.latest_date)
with ts.SetupDbAndCredentials() as s:
args = ["--skip-albums", "--index-only", "--rescan"]
s.test_setup("test_system_incremental", args=args)
s.gp.start(s.parsed_args)
# this should add in everything
db.cur.execute("SELECT COUNT() FROM SyncFiles")
count = db.cur.fetchone()
t = TestAccount.image_count + TestAccount.video_count
self.assertEqual(
t, count[0], "expected a total of {} items after full sync".format(t)
)
def test_skip_video_on_album(self):
"""verify that skip video works when syncing a specific folder"""
with ts.SetupDbAndCredentials() as s:
args = ["--skip-video", "--album", "Movies"]
s.test_setup(
"test_skip_video_on_album", args=args, trash_files=True, trash_db=True
)
s.gp.start(s.parsed_args)
with LocalData(s.root) as db:
db.cur.execute("SELECT COUNT() FROM AlbumFiles")
count = db.cur.fetchone()
self.assertEqual(0, count[0], "expected 0 video files in album Movies")
@patch.object(GooglePhotosDownload, "do_download_file")
def test_bad_ids(self, do_download_file):
do_download_file.side_effect = HTTPError(Mock(status=500), "ouch!")
with ts.SetupDbAndCredentials() as s:
args = [
"--start-date",
TestAccount.start,
"--end-date",
TestAccount.end,
"--skip-albums",
]
s.test_setup("test_bad_ids", args=args, trash_db=True, trash_files=True)
s.gp.start(s.parsed_args)
# check we tried to download 10 times
self.assertEqual(
do_download_file.call_count,
TestAccount.image_count_2016,
"Expected {} downloads".format(TestAccount.image_count_2016),
)
# this should have created a Bad IDs file
bad_ids = BadIds(s.root)
self.assertEqual(
len(bad_ids.items),
TestAccount.image_count_2016,
"Expected {} Bad IDs entries".format(TestAccount.image_count_2016),
)
do_download_file.reset_mock()
s.__exit__()
s.test_setup("test_bad_ids", args=args)
s.gp.start(s.parsed_args)
# this should have skipped the bad ids and not tried to download
self.assertEqual(
do_download_file.call_count, 0, "Expected 0 calls to do_download"
)
|
dbce2be2823b80284cff474a855423319d4b2bcf
|
fab5ffec1370326b89a04513f0362b66ed132357
|
/tests/fake_response.py
|
3ae728c29153cf3cc91bbf1058d95f541fe97bbf
|
[
"MIT"
] |
permissive
|
kmadac/bitstamp-python-client
|
a413da3167900659bb5848a4bc6a0a546e269aaa
|
c0f31b73d5e2cc4d5b29800d285b8ba777c77ad1
|
refs/heads/master
| 2022-10-31T19:14:18.249426
| 2022-10-26T18:17:48
| 2022-10-26T18:17:48
| 9,040,839
| 122
| 84
|
MIT
| 2022-10-24T07:06:08
| 2013-03-26T21:28:31
|
Python
|
UTF-8
|
Python
| false
| false
| 265
|
py
|
fake_response.py
|
import requests
class FakeResponse(requests.Response):
def __init__(self, content='', status_code=200):
super(FakeResponse, self).__init__()
self._content = content
self._content_consumed = True
self.status_code = status_code
|
e0549c06e25d256f9769b063285b3a2ce0db3a6c
|
8d44e796eaf0c8e11bbc2a27ef093e97a25b6f4a
|
/haystack/nodes/other/__init__.py
|
a92291f4ed1382ef1b5dfc577e542ee5f8391ea7
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/haystack
|
caa5287051d1771395ea624b58097000825bad81
|
5f1256ac7e5734c2ea481e72cb7e02c34baf8c43
|
refs/heads/main
| 2023-09-01T02:41:23.490526
| 2023-08-31T15:33:12
| 2023-08-31T15:33:12
| 221,654,678
| 10,599
| 1,558
|
Apache-2.0
| 2023-09-14T17:09:42
| 2019-11-14T09:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 396
|
py
|
__init__.py
|
from haystack.nodes.other.docs2answers import Docs2Answers
from haystack.nodes.other.join_docs import JoinDocuments
from haystack.nodes.other.route_documents import RouteDocuments
from haystack.nodes.other.join_answers import JoinAnswers
from haystack.nodes.other.join import JoinNode
from haystack.nodes.other.document_merger import DocumentMerger
from haystack.nodes.other.shaper import Shaper
|
26d2d7c7e2602999dedbed71f88ac915ffd98506
|
860c31e414c4c280b70ec0872042d715a2d56978
|
/benchmarks/train_crnn_cinc2021/special_detectors.py
|
c2934284246a24abd05fd619425b0f6b2c7fb80b
|
[
"MIT"
] |
permissive
|
DeepPSP/torch_ecg
|
255e49ff436e13044a1f049141f982680e56970e
|
a40c65f4fefa83ba7d3d184072a4c05627b7e226
|
refs/heads/master
| 2023-09-01T06:47:17.153216
| 2023-08-31T18:00:47
| 2023-08-31T18:00:47
| 298,482,237
| 111
| 16
|
MIT
| 2023-08-21T11:25:07
| 2020-09-25T06:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 24,662
|
py
|
special_detectors.py
|
"""
special detectors using rules,
for (perhaps auxiliarily) detecting PR, Brady (including SB), LQRSV, RAD, LAD, STach
pending arrhythmia classes: LPR, LQT
NOTE:
-----
1. ALL signals are assumed to have units in mV
2. almost all the rules can be found in `utils.ecg_arrhythmia_knowledge`
3. "PR" is superior to electrical axis deviation, which should be considered in the final decision.
the co-occurrence of "PR" and "LAD" is 7; the co-occurrence of "PR" and "RAD" is 3, whose probabilities are both relatively low
TODO:
-----
currently all are binary detectors, --> detectors producing a probability?
"""
from itertools import repeat
from numbers import Real
from typing import Any, Optional, Sequence
import numpy as np
from biosppy.signals.tools import filter_signal
from deprecated import deprecated
from scipy.signal import peak_prominences
try:
import torch_ecg # noqa: F401
except ModuleNotFoundError:
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).absolute().parents[2]))
from cfg import SpecialDetectorCfg
from torch_ecg.cfg import CFG
from torch_ecg.utils.ecg_arrhythmia_knowledge import (
LimbLeads,
PrecordialLeads,
Standard12Leads,
)
from torch_ecg.utils.misc import ms2samples, samples2ms
from torch_ecg.utils.utils_data import get_mask
from torch_ecg.utils._preproc import preprocess_multi_lead_signal
from torch_ecg.utils.utils_signal import detect_peaks, get_ampl
__all__ = [
"special_detectors",
"pacing_rhythm_detector",
"electrical_axis_detector",
"brady_tachy_detector",
"LQRSV_detector",
"PRWP_detector",
]
def special_detectors(
raw_sig: np.ndarray,
fs: Real,
sig_fmt: str = "channel_first",
leads: Sequence[str] = Standard12Leads,
verbose: int = 0,
**kwargs: Any,
) -> dict:
"""
Parameters
----------
raw_sig: ndarray,
the raw multi-lead ecg signal, with units in mV
fs: real number,
sampling frequency of `sig`
sig_fmt: str, default "channel_first",
format of the multi-lead ecg signal,
"channel_last" (alias "lead_last"), or
"channel_first" (alias "lead_first", original)
leads: sequence of str,
names of the leads in the input signal
verbose: int, default 0,
print verbosity
kwargs: dict,
keyword arguments, including:
"rpeak_fn": rpeak detection method, can be one of
"seq_lab", "xqrs", "gqrs", "hamilton", "ssf", "christov", "engzee", gamboa"
the default method is "xqrs",
which has less environment issues compared to the deep learning method "seq_lab"
"axis_method": electrical axis detection method, can be one of
"2-lead", "3-lead"
the default method is "2-lead"
Returns
-------
conclusion: dict,
probability or binary conclusion for each arrhythm
"""
preprocess = preprocess_multi_lead_signal(
raw_sig,
fs,
sig_fmt,
rpeak_fn=kwargs.get("rpeak_fn", "xqrs"),
# rpeak_fn=kwargs.get("rpeak_fn", "seq_lab"),
verbose=verbose,
)
filtered_sig = preprocess["filtered_ecg"]
rpeaks = preprocess["rpeaks"]
is_PR = pacing_rhythm_detector(
raw_sig, fs, sig_fmt, leads, ret_prob=False, verbose=verbose
)
axis = electrical_axis_detector(
filtered_sig,
rpeaks,
fs,
sig_fmt,
leads,
method=kwargs.get("axis_method", "2-lead"),
verbose=verbose,
)
brady_tachy = brady_tachy_detector(rpeaks, fs, verbose=verbose)
is_LQRSV = LQRSV_detector(filtered_sig, rpeaks, fs, sig_fmt, leads, verbose=verbose)
is_PRWP = PRWP_detector(filtered_sig, rpeaks, fs, sig_fmt, leads, verbose=verbose)
is_LAD = axis == "LAD"
is_RAD = axis == "RAD"
is_brady = brady_tachy == "B"
is_tachy = brady_tachy == "T"
conclusion = CFG(
is_brady=is_brady,
is_tachy=is_tachy,
is_LAD=is_LAD,
is_RAD=is_RAD,
is_PR=is_PR,
is_LQRSV=is_LQRSV,
is_PRWP=is_PRWP,
)
return conclusion
def pacing_rhythm_detector(
raw_sig: np.ndarray,
fs: Real,
sig_fmt: str = "channel_first",
leads: Sequence[str] = Standard12Leads,
ret_prob: bool = True,
verbose: int = 0,
) -> Real:
"""to be improved (fine-tuning hyper-parameters in cfg.py),
Parameters
----------
raw_sig: ndarray,
the raw multi-lead ecg signal, with units in mV
fs: real number,
sampling frequency of `sig`
sig_fmt: str, default "channel_first",
format of the multi-lead ecg signal,
"channel_last" (alias "lead_last"), or
"channel_first" (alias "lead_first", original)
leads: sequence of str,
names of the leads in the input signal
ret_prob: bool, default True,
if True, a probability will be returned,
otherwise, a binary prediction will be returned
verbose: int, default 0,
print verbosity
Returns
-------
is_PR: real number,
probability for the ecg signal to be of pacing rhythm,
or a binary disicion
"""
if sig_fmt.lower() in ["channel_first", "lead_first"]:
s = raw_sig.copy()
else:
s = raw_sig.T
data_hp = np.array(
[
filter_signal(
s[lead, ...],
ftype="butter",
band="highpass",
order=20,
frequency=SpecialDetectorCfg.pr_fs_lower_bound,
sampling_rate=fs,
)["signal"]
for lead in range(s.shape[0])
]
)
potential_spikes = []
# sig_len = data_hp.shape[-1]
n_leads, sig_len = data_hp.shape
assert n_leads == len(leads)
for ld in range(n_leads):
lead_hp = np.abs(data_hp[ld, ...])
mph = SpecialDetectorCfg.pr_spike_mph_ratio * np.sum(lead_hp) / sig_len
lead_spikes = detect_peaks(
x=lead_hp,
mph=mph,
mpd=ms2samples(SpecialDetectorCfg.pr_spike_mpd, fs),
prominence=SpecialDetectorCfg.pr_spike_prominence,
prominence_wlen=ms2samples(SpecialDetectorCfg.pr_spike_prominence_wlen, fs),
verbose=0,
)
if verbose >= 2:
print(f"for the {ld}-th lead, its spike detecting mph = {mph:.4f} mV")
print(f"lead_spikes = {lead_spikes.tolist()}")
print(
f"with prominences = {np.round(peak_prominences(lead_hp, lead_spikes, wlen=ms2samples(SpecialDetectorCfg.pr_spike_prominence_wlen, fs))[0], 5).tolist()}"
)
potential_spikes.append(lead_spikes)
# make decision using `potential_spikes`
sig_duration_ms = samples2ms(sig_len, fs)
# lead_has_enough_spikes = [False if len(potential_spikes[ld]) ==0 else sig_duration_ms / len(potential_spikes[ld]) < SpecialDetectorCfg.pr_spike_inv_density_threshold for ld in range(n_leads)]
lead_has_enough_spikes = list(repeat(0, n_leads))
for ld in range(n_leads):
if len(potential_spikes[ld]) > 0:
relative_inv_density = (
SpecialDetectorCfg.pr_spike_inv_density_threshold
- sig_duration_ms / len(potential_spikes[ld])
)
# sigmoid
lead_has_enough_spikes[ld] = 1 / (1 + np.exp(-relative_inv_density / 100))
if not ret_prob:
lead_has_enough_spikes[ld] = int(lead_has_enough_spikes[ld] >= 0.5)
if verbose >= 1:
print(f"lead_has_enough_spikes = {lead_has_enough_spikes}")
print(
f"leads spikes density (units in ms) = {[len(potential_spikes[ld]) / sig_duration_ms for ld in range(n_leads)]}"
)
_threshold = int(round(SpecialDetectorCfg.pr_spike_leads_threshold * n_leads))
if ret_prob:
# pooling (max, or avg)
is_PR = sorted(lead_has_enough_spikes, reverse=True)[:_threshold]
is_PR = np.mean(is_PR)
else:
is_PR = sum(lead_has_enough_spikes) >= _threshold
return is_PR
def electrical_axis_detector(
filtered_sig: np.ndarray,
rpeaks: np.ndarray,
fs: Real,
sig_fmt: str = "channel_first",
leads: Sequence[str] = Standard12Leads,
method: Optional[str] = None,
verbose: int = 0,
) -> str:
"""to be improved (fine-tuning hyper-parameters in cfg.py),
detector of the heart electrical axis by means of "2-lead" method or "3-lead" method,
NOTE that the extreme axis is not checked and treated as "normal"
Parameters
----------
filtered_sig: ndarray,
the filtered multi-lead ecg signal, with units in mV
rpeaks: ndarray,
array of indices of the R peaks
fs: real number,
sampling frequency of `sig`
sig_fmt: str, default "channel_first",
format of the multi-lead ecg signal,
"channel_last" (alias "lead_last"), or
"channel_first" (alias "lead_first", original)
leads: sequence of str,
names of the leads in the input signal
method: str, optional,
method for detecting electrical axis, can be "2-lead", "3-lead",
if not specified, `SpecialDetectorCfg.axis_method` will be used
verbose: int, default 0,
print verbosity
Returns
-------
axis: str,
one of "normal", "LAD", "RAD",
the heart electrical axis
"""
decision_method = method or SpecialDetectorCfg.axis_method
decision_method = decision_method.lower()
assert decision_method in [
"2-lead",
"3-lead",
], f"Method `{decision_method}` not supported!"
if sig_fmt.lower() in ["channel_first", "lead_first"]:
s = filtered_sig.copy()
else:
s = filtered_sig.T
if len(set(["I", "aVF"]).intersection(leads)) < 2:
# impossible to make decision
# return "normal" by default
axis = "normal"
return axis
lead_I = s[list(leads).index("I")]
lead_aVF = s[list(leads).index("aVF")]
try:
lead_II = s[list(leads).index("II")]
except Exception:
# no lead II, degenerates to the "2-lead" method
method = "2-lead"
if len(rpeaks == 0):
# degenerate case
# voltage might be too low to detect rpeaks
lead_I_positive = np.max(lead_I) > np.abs(np.min(lead_I))
lead_II_positive = np.max(lead_II) > np.abs(np.min(lead_II))
lead_aVF_positive = np.max(lead_aVF) > np.abs(np.min(lead_aVF))
# decision making
if decision_method == "2-lead":
if lead_I_positive and not lead_aVF_positive:
axis = "LAD"
elif not lead_I_positive and lead_aVF_positive:
axis = "RAD"
else: # if `rpeaks` is empty, all conditions are False
axis = "normal" # might also include extreme axis
elif decision_method == "3-lead":
if lead_I_positive and not lead_II_positive and not lead_aVF_positive:
axis = "LAD"
elif not lead_I_positive and lead_aVF_positive:
axis = "RAD"
else:
axis = "normal" # might also include extreme axis
return axis
sig_len = s.shape[1]
radius = ms2samples(SpecialDetectorCfg.axis_qrs_mask_radius, fs)
l_qrs = []
for r in rpeaks:
l_qrs.append([max(0, r - radius), min(sig_len - 1, r + radius)])
if verbose >= 1:
print(f"qrs mask radius = {radius}, sig_len = {sig_len}")
print(f"l_qrs = {l_qrs}")
# lead I
lead_I_positive = (
sum(
[
np.max(lead_I[qrs_itv[0] : qrs_itv[1]])
> np.abs(np.min(lead_I[qrs_itv[0] : qrs_itv[1]]))
for qrs_itv in l_qrs
]
)
>= len(l_qrs) // 2 + 1
)
# lead aVF
lead_aVF_positive = (
sum(
[
np.max(lead_aVF[qrs_itv[0] : qrs_itv[1]])
> np.abs(np.min(lead_aVF[qrs_itv[0] : qrs_itv[1]]))
for qrs_itv in l_qrs
]
)
>= len(l_qrs) // 2 + 1
)
# lead II
lead_II_positive = (
sum(
[
np.max(lead_II[qrs_itv[0] : qrs_itv[1]])
> np.abs(np.min(lead_II[qrs_itv[0] : qrs_itv[1]]))
for qrs_itv in l_qrs
]
)
>= len(l_qrs) // 2 + 1
)
# decision making
if decision_method == "2-lead":
if lead_I_positive and not lead_aVF_positive:
axis = "LAD"
elif not lead_I_positive and lead_aVF_positive:
axis = "RAD"
else: # if `rpeaks` is empty, all conditions are False
axis = "normal" # might also include extreme axis
elif decision_method == "3-lead":
if lead_I_positive and not lead_II_positive and not lead_aVF_positive:
axis = "LAD"
elif not lead_I_positive and lead_aVF_positive:
axis = "RAD"
else:
axis = "normal" # might also include extreme axis
return axis
def brady_tachy_detector(
rpeaks: np.ndarray,
fs: Real,
normal_rr_range: Optional[Sequence[Real]] = None,
verbose: int = 0,
) -> str:
"""to be improved (fine-tuning hyper-parameters in cfg.py),
detemine if the ecg is bradycadia or tachycardia or normal,
only by the mean rr interval.
this detector can be used alone (e.g. for the arrhythmia `Brady`),
or combined with other detectors (e.g. for the arrhythmia `STach`)
Parameters
----------
rpeaks: ndarray,
array of indices of the R peaks
fs: real number,
sampling frequency of the ecg signal
normal_rr_range: sequence of int, optional,
the range of normal rr interval, with units in ms;
if not given, default values from `SpecialDetectorCfg` will be used
verbose: int, default 0,
print verbosity
Returns
-------
conclusion: str,
one of "T" (tachycardia), "B" (bradycardia), "N" (normal)
"""
if len(rpeaks) <= 1:
# unable to make predictions
# TODO: try using spectral method
conclusion = "N"
return conclusion
rr_intervals = np.diff(rpeaks)
mean_rr = np.mean(rr_intervals)
if verbose >= 1:
if len(rr_intervals) > 0:
print(
f"mean_rr = {round(samples2ms(mean_rr, fs), 1)} ms, with detailed rr_intervals (with units in ms) = {(np.vectorize(lambda item:samples2ms(item, fs))(rr_intervals)).tolist()}"
)
else:
print("not enough r peaks for computing rr intervals")
nrr = normal_rr_range or [
SpecialDetectorCfg.tachy_threshold,
SpecialDetectorCfg.brady_threshold,
]
nrr = sorted(nrr)
assert len(nrr) >= 2
nrr = [ms2samples(nrr[0], fs), ms2samples(nrr[-1], fs)]
# if mean_rr is nan, then all conditions are False, hence the `else` branch is entered
if mean_rr < nrr[0]:
conclusion = "T"
elif mean_rr > nrr[1]:
conclusion = "B"
else:
conclusion = "N"
return conclusion
def LQRSV_detector(
filtered_sig: np.ndarray,
rpeaks: np.ndarray,
fs: Real,
sig_fmt: str = "channel_first",
leads: Sequence[str] = Standard12Leads,
verbose: int = 0,
) -> bool:
"""to be improved (fine-tuning hyper-parameters in cfg.py),
Parameters
----------
filtered_sig: ndarray,
the filtered multi-lead ecg signal, with units in mV
rpeaks: ndarray,
array of indices of the R peaks
fs: real number,
sampling frequency of the ecg signal
sig_fmt: str, default "channel_first",
format of the 12 lead ecg signal,
"channel_last" (alias "lead_last"), or
"channel_first" (alias "lead_first", original)
leads: sequence of str,
names of the leads in the input signal
verbose: int, default 0,
print verbosity
Returns
-------
is_LQRSV: bool,
the ecg signal is of arrhythmia `LQRSV` or not
"""
sig_ampl = get_ampl(
sig=filtered_sig,
fs=fs,
fmt=sig_fmt,
window=2 * SpecialDetectorCfg.lqrsv_qrs_mask_radius / 1000, # ms to s
critical_points=rpeaks,
)
limb_leads = [ld for ld in leads if ld in LimbLeads]
limb_lead_inds = [list(leads).index(ld) for ld in limb_leads]
precordial_leads = [ld for ld in leads if ld in PrecordialLeads]
precordial_lead_inds = [list(leads).index(ld) for ld in precordial_leads]
if verbose >= 1:
print(
f"limb_lead_inds = {limb_lead_inds}, precordial_lead_inds = {precordial_lead_inds}"
)
low_qrs_limb_leads = [
sig_ampl[idx] <= 0.5 + SpecialDetectorCfg.lqrsv_ampl_bias
for idx in limb_lead_inds
]
if len(low_qrs_limb_leads) > 0:
low_qrs_limb_leads = sum(low_qrs_limb_leads) / len(
low_qrs_limb_leads
) # to ratio
else: # no limb leads
# determining LQRSV using limb leads and precordial leads, its relation is OR
# hence default values are set 0 if no limb leads or precordial leads
low_qrs_limb_leads = 0
low_qrs_precordial_leads = [
sig_ampl[idx] <= 1 + SpecialDetectorCfg.lqrsv_ampl_bias
for idx in precordial_lead_inds
]
if len(low_qrs_precordial_leads) > 0:
low_qrs_precordial_leads = sum(low_qrs_precordial_leads) / len(
low_qrs_precordial_leads
)
else:
low_qrs_precordial_leads = 0
if verbose >= 2:
print(f"ratio of low qrs in limb leads = {low_qrs_limb_leads}")
print(f"ratio of low qrs in precordial leads = {low_qrs_precordial_leads}")
is_LQRSV = (low_qrs_limb_leads >= SpecialDetectorCfg.lqrsv_ratio_threshold) or (
low_qrs_precordial_leads >= SpecialDetectorCfg.lqrsv_ratio_threshold
)
return is_LQRSV
@deprecated
def LQRSV_detector_backup(
filtered_sig: np.ndarray,
rpeaks: np.ndarray,
fs: Real,
sig_fmt: str = "channel_first",
leads: Sequence[str] = Standard12Leads,
verbose: int = 0,
) -> bool:
"""to be improved (fine-tuning hyper-parameters in cfg.py),
Parameters
----------
filtered_sig: ndarray,
the filtered 12-lead ecg signal, with units in mV
rpeaks: ndarray,
array of indices of the R peaks
fs: real number,
sampling frequency of the ecg signal
sig_fmt: str, default "channel_first",
format of the 12 lead ecg signal,
"channel_last" (alias "lead_last"), or
"channel_first" (alias "lead_first", original)
leads: sequence of str,
names of the leads in the input signal
verbose: int, default 0,
print verbosity
Returns
-------
is_LQRSV: bool,
the ecg signal is of arrhythmia `LQRSV` or not
"""
if sig_fmt.lower() in ["channel_first", "lead_first"]:
sig_ampl = filtered_sig.copy()
else:
sig_ampl = filtered_sig.T
qrs_mask_radius = ms2samples(SpecialDetectorCfg.lqrsv_qrs_mask_radius, fs)
l_qrs = get_mask(
shape=sig_ampl.shape,
critical_points=rpeaks,
left_bias=qrs_mask_radius,
right_bias=qrs_mask_radius,
return_fmt="intervals",
)
if verbose >= 2:
print(f"qrs intervals = {l_qrs}")
limb_leads = [ld for ld in leads if ld in LimbLeads]
limb_lead_inds = [list(leads).index(ld) for ld in limb_leads]
precordial_leads = [ld for ld in leads if ld in PrecordialLeads]
precordial_lead_inds = [list(leads).index(ld) for ld in precordial_leads]
l_qrs_limb_leads = []
l_qrs_precordial_leads = []
if len(l_qrs) == 0:
# no rpeaks detected
low_qrs_limb_leads = [
np.max(sig_ampl[idx]) <= 0.5 + SpecialDetectorCfg.lqrsv_ampl_bias
for idx in limb_lead_inds
]
low_qrs_limb_leads = sum(low_qrs_limb_leads) / len(
low_qrs_limb_leads
) # to ratio
low_qrs_precordial_leads = [
np.max(sig_ampl[idx]) <= 1 + SpecialDetectorCfg.lqrsv_ampl_bias
for idx in precordial_lead_inds
]
low_qrs_precordial_leads = sum(low_qrs_precordial_leads) / len(
low_qrs_precordial_leads
)
else:
for itv in l_qrs:
for idx in limb_lead_inds:
l_qrs_limb_leads.append(sig_ampl[idx, itv[0] : itv[1]].flatten())
for idx in precordial_lead_inds:
l_qrs_precordial_leads.append(sig_ampl[idx, itv[0] : itv[1]].flatten())
if verbose >= 2:
print("for limb leads, the qrs amplitudes are as follows:")
for idx, lead_name in enumerate(limb_leads):
print(
f"for limb lead {lead_name}, the qrs amplitudes are {[np.max(item) for item in l_qrs_limb_leads[idx*len(l_qrs): (idx+1)*len(l_qrs)]]}"
)
for idx, lead_name in enumerate(precordial_leads):
print(
f"for precordial lead {lead_name}, the qrs amplitudes are {[np.max(item) for item in l_qrs_limb_leads[idx*len(l_qrs): (idx+1)*len(l_qrs)]]}"
)
low_qrs_limb_leads = [
np.max(item) <= 0.5 + SpecialDetectorCfg.lqrsv_ampl_bias
for item in l_qrs_limb_leads
]
low_qrs_limb_leads = sum(low_qrs_limb_leads) / len(
low_qrs_limb_leads
) # to ratio
low_qrs_precordial_leads = [
np.max(item) <= 1 + SpecialDetectorCfg.lqrsv_ampl_bias
for item in l_qrs_precordial_leads
]
low_qrs_precordial_leads = sum(low_qrs_precordial_leads) / len(
low_qrs_precordial_leads
)
if verbose >= 2:
print(f"ratio of low qrs in limb leads = {low_qrs_limb_leads}")
print(f"ratio of low qrs in precordial leads = {low_qrs_precordial_leads}")
is_LQRSV = (low_qrs_limb_leads >= SpecialDetectorCfg.lqrsv_ratio_threshold) or (
low_qrs_precordial_leads >= SpecialDetectorCfg.lqrsv_ratio_threshold
)
return is_LQRSV
def PRWP_detector(
filtered_sig: np.ndarray,
rpeaks: np.ndarray,
fs: Real,
sig_fmt: str = "channel_first",
leads: Sequence[str] = Standard12Leads,
verbose: int = 0,
) -> bool:
"""to be improved
Parameters
----------
filtered_sig: ndarray,
the filtered multi-lead ecg signal, with units in mV
rpeaks: ndarray,
array of indices of the R peaks
fs: real number,
sampling frequency of the ecg signal
sig_fmt: str, default "channel_first",
format of the 12 lead ecg signal,
"channel_last" (alias "lead_last"), or
"channel_first" (alias "lead_first", original)
leads: sequence of str,
names of the leads in the input signal
verbose: int, default 0,
print verbosity
Returns
-------
is_PRWP: bool,
the ecg signal is of arrhythmia `PRWP` or not
"""
if sig_fmt.lower() in ["channel_first", "lead_first"]:
r_ampl = filtered_sig[..., rpeaks]
else:
# all change to lead_first
r_ampl = filtered_sig[rpeaks, ...].T
if (
len(set([f"V{n}" for n in range(1, 5)]).intersection(leads)) < 2
and "V3" not in leads
):
# leads insufficient to make decision
is_PRWP = False
return is_PRWP
limb_leads = [ld for ld in leads if ld in LimbLeads]
limb_lead_inds = [list(leads).index(ld) for ld in limb_leads]
try:
lead_V3_ind = list(leads).index("V3")
except Exception:
lead_V3_ind = None
leads_V1_4 = [ld for ld in leads if ld in ["V1", "V2", "V3", "V4"]]
leads_V1_4_inds = [list(leads).index(ld) for ld in leads_V1_4]
# condition 1: R<3mm in V3
if lead_V3_ind is not None:
cond1 = np.mean(r_ampl[lead_V3_ind, ...]) < SpecialDetectorCfg.prwp_v3_thr
if verbose >= 1:
print(
f"PRWP condition 1: R amplitude in lead V3 = {np.mean(r_ampl[lead_V3_ind, ...])}"
)
else:
cond1 = False
# condition 2: reversed R wave progression, which is defined as R in V4 < R in V3 or R in V3 < R in V2 or R in V2 < R in V1
cond2 = (np.diff(np.mean(r_ampl[leads_V1_4_inds, ...], axis=-1)) < 0).any()
if verbose >= 1:
diff = np.diff(np.mean(r_ampl[leads_V1_4_inds, ...], axis=-1))
print(
f"PRWP condition 2: reversed R wave progression, diff of mean R amplitude in V1-4 = {diff}"
)
# condition 3: delayed transition beyond V4
# currently, exact meaning of condition 3 is not clear
cond3 = False
# the or rule
is_PRWP = cond1 or cond2 or cond3
return is_PRWP
|
2d4cd451c3611fc4d048a98931df59cb9d06fba0
|
8e90a7759ec7143427823547e0fbff58e0343aaa
|
/inference_api/src/main/object_detection/core/anchor_generator.py
|
69e29d84db8817c79f00f4fdf4ee4aa14b9828a1
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
BMW-InnovationLab/BMW-TensorFlow-Training-GUI
|
646a6f86f26887e94351b4c572b7fe7f0842f75c
|
06531dae14365986c86baf735fd149317f4bb67a
|
refs/heads/master
| 2023-07-20T01:48:27.299962
| 2023-07-12T15:22:22
| 2023-07-12T15:22:22
| 227,429,492
| 1,030
| 198
|
Apache-2.0
| 2023-05-22T17:40:23
| 2019-12-11T18:06:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,512
|
py
|
anchor_generator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base anchor generator.
The job of the anchor generator is to create (or load) a collection
of bounding boxes to be used as anchors.
Generated anchors are assumed to match some convolutional grid or list of grid
shapes. For example, we might want to generate anchors matching an 8x8
feature map and a 4x4 feature map. If we place 3 anchors per grid location
on the first feature map and 6 anchors per grid location on the second feature
map, then 3*8*8 + 6*4*4 = 288 anchors are generated in total.
To support fully convolutional settings, feature map shapes are passed
dynamically at generation time. The number of anchors to place at each location
is static --- implementations of AnchorGenerator must always be able return
the number of anchors that it uses per location for each feature map.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import six
from six.moves import zip
import tensorflow.compat.v1 as tf
class AnchorGenerator(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for anchor generators."""
@abstractmethod
def name_scope(self):
"""Name scope.
Must be defined by implementations.
Returns:
a string representing the name scope of the anchor generation operation.
"""
pass
@property
def check_num_anchors(self):
"""Whether to dynamically check the number of anchors generated.
Can be overridden by implementations that would like to disable this
behavior.
Returns:
a boolean controlling whether the Generate function should dynamically
check the number of anchors generated against the mathematically
expected number of anchors.
"""
return True
@abstractmethod
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the `generate` function.
"""
pass
def generate(self, feature_map_shape_list, **params):
"""Generates a collection of bounding boxes to be used as anchors.
TODO(rathodv): remove **params from argument list and make stride and
offsets (for multiple_grid_anchor_generator) constructor arguments.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with. Pairs can be provided as 1-dimensional
integer tensors of length 2 or simply as tuples of integers.
**params: parameters for anchor generation op
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if the number of feature map shapes does not match the length
of NumAnchorsPerLocation.
"""
if self.check_num_anchors and (
len(feature_map_shape_list) != len(self.num_anchors_per_location())):
raise ValueError('Number of feature maps is expected to equal the length '
'of `num_anchors_per_location`.')
with tf.name_scope(self.name_scope()):
anchors_list = self._generate(feature_map_shape_list, **params)
if self.check_num_anchors:
with tf.control_dependencies([
self._assert_correct_number_of_anchors(
anchors_list, feature_map_shape_list)]):
for item in anchors_list:
item.set(tf.identity(item.get()))
return anchors_list
@abstractmethod
def _generate(self, feature_map_shape_list, **params):
"""To be overridden by implementations.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
**params: parameters for anchor generation op
Returns:
boxes_list: a list of BoxList, each holding a collection of N anchor
boxes.
"""
pass
def anchor_index_to_feature_map_index(self, boxlist_list):
"""Returns a 1-D array of feature map indices for each anchor.
Args:
boxlist_list: a list of Boxlist, each holding a collection of N anchor
boxes. This list is produced in self.generate().
Returns:
A [num_anchors] integer array, where each element indicates which feature
map index the anchor belongs to.
"""
feature_map_indices_list = []
for i, boxes in enumerate(boxlist_list):
feature_map_indices_list.append(
i * tf.ones([boxes.num_boxes()], dtype=tf.int32))
return tf.concat(feature_map_indices_list, axis=0)
def _assert_correct_number_of_anchors(self, anchors_list,
feature_map_shape_list):
"""Assert that correct number of anchors was generated.
Args:
anchors_list: A list of box_list.BoxList object holding anchors generated.
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
Returns:
Op that raises InvalidArgumentError if the number of anchors does not
match the number of expected anchors.
"""
expected_num_anchors = 0
actual_num_anchors = 0
for num_anchors_per_location, feature_map_shape, anchors in zip(
self.num_anchors_per_location(), feature_map_shape_list, anchors_list):
expected_num_anchors += (num_anchors_per_location
* feature_map_shape[0]
* feature_map_shape[1])
actual_num_anchors += anchors.num_boxes()
return tf.assert_equal(expected_num_anchors, actual_num_anchors)
|
fe4a09cde8f36bfd6e351c4c35264defeaa8df92
|
799fee946fa3f4773cb1340bb36af5b465fdc570
|
/configs/expose/hrnet_hmr_expose_body.py
|
e9e760d1b72ed13b23bfaf8d9d5f1680e1434ae1
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmhuman3d
|
8c534d3c252f68f2d14d3e67fe67bfbccadfad36
|
9431addec32f7fbeffa1786927a854c0ab79d9ea
|
refs/heads/main
| 2023-08-31T13:30:59.894842
| 2023-07-10T02:32:20
| 2023-07-10T02:32:20
| 432,877,190
| 966
| 139
|
Apache-2.0
| 2023-08-31T08:49:16
| 2021-11-29T02:10:31
|
Python
|
UTF-8
|
Python
| false
| false
| 9,416
|
py
|
hrnet_hmr_expose_body.py
|
_base_ = ['../_base_/default_runtime.py']
use_adversarial_train = True
img_res = 256
# evaluate
evaluation = dict(interval=10, metric=['pa-mpjpe', 'mpjpe'])
optimizer = dict(
backbone=dict(type='Adam', lr=1.0e-4, weight_decay=1.0e-4),
head=dict(type='Adam', lr=1.0e-4, weight_decay=1.0e-4),
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(policy='step', step=[60, 100], gamma=0.1)
runner = dict(type='EpochBasedRunner', max_epochs=100)
log_config = dict(
interval=50, hooks=[
dict(type='TextLoggerHook'),
])
checkpoint_config = dict(interval=10)
# model settings
hrnet_extra = dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384)),
downsample=True,
use_conv=True,
pretrained_layers=[
'conv1',
'bn1',
'conv2',
'bn2',
'layer1',
'transition1',
'stage2',
'transition2',
'stage3',
'transition3',
'stage4',
],
final_conv_kernel=1,
return_list=False)
find_unused_parameters = True
model = dict(
type='SMPLXImageBodyModelEstimator',
backbone=dict(
type='PoseHighResolutionNetExpose',
extra=hrnet_extra,
init_cfg=dict(
type='Pretrained',
checkpoint='data/pretrained_models/hrnet_pretrain.pth')),
head=dict(
type='ExPoseBodyHead',
num_betas=10,
num_expression_coeffs=10,
mean_pose_path='data/body_models/smplx/all_means.pkl',
shape_mean_path='data/body_models/smplx/shape_mean.npy',
pose_param_conf=[
dict(
name='global_orient',
num_angles=1,
use_mean=False,
rotate_axis_x=True),
dict(
name='body_pose',
num_angles=21,
use_mean=True,
rotate_axis_x=False),
dict(
name='left_hand_pose',
num_angles=15,
use_mean=True,
rotate_axis_x=False),
dict(
name='right_hand_pose',
num_angles=15,
use_mean=True,
rotate_axis_x=False),
dict(
name='jaw_pose',
num_angles=1,
use_mean=False,
rotate_axis_x=False),
],
input_feat_dim=2048,
regressor_cfg=dict(
layers=[1024, 1024], activ_type='none', dropout=0.5, gain=0.01),
camera_cfg=dict(pos_func='softplus', mean_scale=0.9)),
body_model_train=dict(
type='SMPLXLayer',
num_expression_coeffs=10,
num_betas=10,
use_face_contour=True,
use_pca=False,
flat_hand_mean=True,
model_path='data/body_models/smplx',
keypoint_src='smplx',
keypoint_dst='smplx',
),
body_model_test=dict(
type='SMPLXLayer',
num_expression_coeffs=10,
num_betas=10,
use_face_contour=True,
use_pca=False,
flat_hand_mean=True,
model_path='data/body_models/smplx',
keypoint_src='lsp',
keypoint_dst='lsp',
joints_regressor='data/body_models/smplx/SMPLX_to_J14.npy'),
loss_keypoints3d=dict(type='L1Loss', reduction='sum', loss_weight=1),
loss_keypoints2d=dict(type='L1Loss', reduction='sum', loss_weight=1),
loss_smplx_global_orient=dict(
type='RotationDistance', reduction='sum', loss_weight=1),
loss_smplx_body_pose=dict(
type='RotationDistance', reduction='sum', loss_weight=1),
loss_smplx_jaw_pose=dict(
type='RotationDistance', reduction='sum', loss_weight=1),
loss_smplx_hand_pose=dict(
type='RotationDistance', reduction='sum', loss_weight=1),
loss_smplx_betas=dict(type='MSELoss', reduction='sum', loss_weight=0.001),
loss_smplx_expression=dict(type='MSELoss', reduction='sum', loss_weight=1),
loss_smplx_betas_prior=dict(
type='ShapeThresholdPriorLoss', margin=3.0, norm='l2', loss_weight=1),
convention='smplx')
# dataset settings
dataset_type = 'HumanImageSMPLXDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data_keys = [
'has_smplx', 'has_keypoints3d', 'has_keypoints2d',
'has_smplx_global_orient', 'has_smplx_body_pose', 'has_smplx_jaw_pose',
'has_smplx_right_hand_pose', 'has_smplx_left_hand_pose', 'has_smplx_betas',
'has_smplx_expression', 'smplx_jaw_pose', 'smplx_body_pose',
'smplx_right_hand_pose', 'smplx_left_hand_pose', 'smplx_global_orient',
'smplx_betas', 'keypoints2d', 'keypoints3d', 'sample_idx',
'smplx_expression'
]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BBoxCenterJitter', factor=0.0, dist='normal'),
dict(type='RandomHorizontalFlip', flip_prob=0.5,
convention='smplx'), # hand = 0,head = body = 0.5
dict(
type='GetRandomScaleRotation',
rot_factor=30.0,
scale_factor=0.25,
rot_prob=0.6),
dict(type='MeshAffine', img_res=img_res), # hand = 224, body = head = 256
dict(type='RandomChannelNoise', noise_factor=0.4),
dict(
type='SimulateLowRes',
dist='categorical',
cat_factors=(1.0, ),
# head = (1.0, 1.2, 1.5, 2.0, 3.0, 4.0, 8.0)
# hand = (1.0, 1.2, 1.5, 2.0, 3.0, 4.0)
# body = (1.0,)
factor_min=1.0,
factor_max=1.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img', 'ori_img']),
dict(type='ToTensor', keys=data_keys),
dict(
type='Collect',
keys=['img', *data_keys],
meta_keys=['image_path', 'center', 'scale', 'rotation', 'ori_img'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='GetRandomScaleRotation', rot_factor=0, scale_factor=0),
dict(type='MeshAffine', img_res=img_res),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=data_keys),
dict(
type='Collect',
keys=['img', *data_keys],
meta_keys=['image_path', 'center', 'scale', 'rotation'])
]
inference_pipeline = [
dict(type='GetRandomScaleRotation', rot_factor=0, scale_factor=0),
dict(type='MeshAffine', img_res=img_res),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img', 'sample_idx'],
meta_keys=['image_path', 'center', 'scale', 'rotation'])
]
cache_files = {
'curated_fits': 'data/cache/curated_fits_train_smplx.npz',
'spin_smplx': 'data/cache/spin_smplx_train.npz',
'h36m': 'data/cache/h36m_train_smplx.npz'
}
data = dict(
samples_per_gpu=48, # body 48, head = hand = 64
workers_per_gpu=8,
train=dict(
type='MixedDataset',
configs=[
dict(
type=dataset_type,
pipeline=train_pipeline,
dataset_name='',
data_prefix='data',
ann_file='curated_fits_train.npz',
convention='smplx',
num_betas=10,
num_expression=10,
cache_data_path=cache_files['curated_fits'],
),
dict(
type=dataset_type,
pipeline=train_pipeline,
dataset_name='',
data_prefix='data',
ann_file='spin_smplx_train.npz',
convention='smplx',
num_betas=10,
num_expression=10,
cache_data_path=cache_files['spin_smplx'],
),
dict(
type=dataset_type,
pipeline=train_pipeline,
dataset_name='h36m',
data_prefix='data',
ann_file='h36m_train.npz',
convention='smplx',
num_betas=10,
num_expression=10,
cache_data_path=cache_files['h36m'],
),
],
partition=[0.08, 0.12, 0.8],
),
val=dict(
type=dataset_type,
body_model=dict(
type='SMPL',
keypoint_src='h36m',
keypoint_dst='h36m',
model_path='data/body_models/smpl',
joints_regressor='data/body_models/J_regressor_h36m.npy'),
dataset_name='3DPW',
data_prefix='data',
pipeline=test_pipeline,
ann_file='pw3d_test.npz'),
test=dict(
type=dataset_type,
body_model=dict(
type='SMPL',
keypoint_src='h36m',
keypoint_dst='h36m',
model_path='data/body_models/smpl',
joints_regressor='data/body_models/J_regressor_h36m.npy'),
dataset_name='3DPW',
data_prefix='data',
pipeline=test_pipeline,
ann_file='pw3d_test.npz'),
)
|
ae2d81b991d4f8475bd420f680ad4c45700d3091
|
3dc3bbe607ab7b583eb52dbaae86636eb642960a
|
/tools/data/hacs/write_feature_csv.py
|
9fb098b4bfbc0190dff2765c62d6e0496c78923f
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmaction2
|
659c36c6083fd3d9d072e074a8d4b3a50342b9bd
|
582b78fd6c3240500d5cacd292339d7d1ddbb056
|
refs/heads/main
| 2023-08-28T18:14:50.423980
| 2023-08-10T09:20:06
| 2023-08-10T09:20:06
| 278,810,244
| 3,498
| 1,028
|
Apache-2.0
| 2023-09-07T06:50:44
| 2020-07-11T07:19:10
|
Python
|
UTF-8
|
Python
| false
| false
| 689
|
py
|
write_feature_csv.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmengine
features = mmengine.load('result.pkl')
video_list = mmengine.list_from_file('hacs_data.txt')
feature_dir = '../../../data/HACS/slowonly_feature'
mmengine.mkdir_or_exist(feature_dir)
head = ','.join([f'f{i}' for i in range(700)]) + '\n'
for feature, video in zip(features, video_list):
video_id = video.split()[0].split('/')[1]
csv_file = video_id.replace('mp4', 'csv')
feat = feature['pred_scores']['item'].numpy()
feat = feat.tolist()
csv_path = f'{feature_dir}/{csv_file}'
with open(csv_path, 'w') as f:
f.write(head)
for line in feat:
f.write(str(line)[1:-1] + '\n')
|
9fce1d817bb7c256f8ab75b0f87c1b0be73ba681
|
2342b8737b9ffeb9715158b8ec74a33c7a4947f6
|
/koku/reporting_common/migrations/0029_costusagereportmanifest_operator_info.py
|
0bf172be84a8b8a7a3ccee6dc26b96f436ae884d
|
[
"Apache-2.0"
] |
permissive
|
project-koku/koku
|
444d8df05da5416c9cee606c42481c99be45f13d
|
0416e5216eb1ec4b41c8dd4999adde218b1ab2e1
|
refs/heads/main
| 2023-08-20T11:30:17.510182
| 2023-08-17T18:27:30
| 2023-08-17T18:27:30
| 126,496,611
| 225
| 94
|
Apache-2.0
| 2023-09-14T17:38:08
| 2018-03-23T14:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 913
|
py
|
0029_costusagereportmanifest_operator_info.py
|
# Generated by Django 3.1.12 on 2021-07-29 19:02
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("reporting_common", "0028_costusagereportmanifest_operator_version")]
operations = [
migrations.AddField(
model_name="costusagereportmanifest", name="cluster_channel", field=models.TextField(null=True)
),
migrations.AddField(
model_name="costusagereportmanifest", name="operator_airgapped", field=models.BooleanField(null=True)
),
migrations.AddField(
model_name="costusagereportmanifest", name="operator_certified", field=models.BooleanField(null=True)
),
migrations.AddField(
model_name="costusagereportmanifest",
name="operator_errors",
field=models.JSONField(default=dict, null=True),
),
]
|
8ccfc748f893fc11e7befec5e50c6978eb181a30
|
7aa58030cd72c9f9c7906042e6c89693d373f065
|
/ct/py/shell_utils.py
|
96bb4f839c78f0e8a35a9dd49e8205168bd2e471
|
[
"BSD-3-Clause"
] |
permissive
|
google/skia-buildbot
|
991162a936819f2be95ae9dbc86bd7a7fc649e6a
|
9f1fad0fdc31563f6b43ec469b0550f2eeece804
|
refs/heads/main
| 2023-08-29T07:36:11.102029
| 2023-08-29T04:01:01
| 2023-08-29T04:15:54
| 15,773,235
| 131
| 63
|
BSD-3-Clause
| 2023-07-19T00:21:42
| 2014-01-09T17:10:17
|
Go
|
UTF-8
|
Python
| false
| false
| 8,697
|
py
|
shell_utils.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" This module contains tools for running commands in a shell. """
import datetime
import os
import queue
import select
import subprocess
import sys
import threading
import time
if 'nt' in os.name:
import ctypes
DEFAULT_SECS_BETWEEN_ATTEMPTS = 10
POLL_MILLIS = 250
VERBOSE = True
class CommandFailedException(Exception):
"""Exception which gets raised when a command fails."""
def __init__(self, output, *args):
"""Initialize the CommandFailedException.
Args:
output: string; output from the command.
"""
Exception.__init__(self, *args)
self._output = output
@property
def output(self):
"""Output from the command."""
return self._output
class TimeoutException(CommandFailedException):
"""CommandFailedException which gets raised when a subprocess exceeds its
timeout. """
pass
def run_async(cmd, echo=None, shell=False):
""" Run 'cmd' in a subprocess, returning a Popen class instance referring to
that process. (Non-blocking) """
if echo is None:
echo = VERBOSE
if echo:
print(' '.join(cmd) if isinstance(cmd, list) else cmd)
if 'nt' in os.name:
# Windows has a bad habit of opening a dialog when a console program
# crashes, rather than just letting it crash. Therefore, when a program
# crashes on Windows, we don't find out until the build step times out.
# This code prevents the dialog from appearing, so that we find out
# immediately and don't waste time waiting around.
SEM_NOGPFAULTERRORBOX = 0x0002
ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
flags = 0x8000000 # CREATE_NO_WINDOW
else:
flags = 0
return subprocess.Popen(cmd, shell=shell, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, creationflags=flags)
class EnqueueThread(threading.Thread):
""" Reads and enqueues lines from a file. """
def __init__(self, file_obj, q):
threading.Thread.__init__(self)
self._file = file_obj
self._queue = q
self._stopped = False
def run(self):
if sys.platform.startswith('linux'):
# Use a polling object to avoid the blocking call to readline().
poll = select.poll()
poll.register(self._file, select.POLLIN)
while not self._stopped:
has_output = poll.poll(POLL_MILLIS)
if has_output:
line = self._file.readline()
if line == '':
self._stopped = True
self._queue.put(line)
else:
# Only Unix supports polling objects, so just read from the file,
# Python-style.
for line in iter(self._file.readline, ''):
self._queue.put(line)
if self._stopped:
break
def stop(self):
self._stopped = True
def log_process_in_real_time(proc, echo=None, timeout=None, log_file=None,
halt_on_output=None, print_timestamps=True):
""" Log the output of proc in real time until it completes. Return a tuple
containing the exit code of proc and the contents of stdout.
proc: an instance of Popen referring to a running subprocess.
echo: boolean indicating whether to print the output received from proc.stdout
timeout: number of seconds allotted for the process to run. Raises a
TimeoutException if the run time exceeds the timeout.
log_file: an open file for writing output
halt_on_output: string; kill the process and return if this string is found
in the output stream from the process.
print_timestamps: boolean indicating whether a formatted timestamp should be
prepended to each line of output.
"""
if echo is None:
echo = VERBOSE
stdout_queue = queue.Queue()
log_thread = EnqueueThread(proc.stdout, stdout_queue)
log_thread.start()
try:
all_output = []
t_0 = time.time()
while True:
code = proc.poll()
try:
output = stdout_queue.get_nowait().decode('utf-8')
all_output.append(output)
if output and print_timestamps:
timestamp = datetime.datetime.now().strftime('%H:%M:%S.%f')
output = ''.join(['[%s] %s\n' % (timestamp, line)
for line in output.splitlines()])
if echo:
sys.stdout.write(output)
sys.stdout.flush()
if log_file:
log_file.write(output)
log_file.flush()
if halt_on_output and halt_on_output in output:
proc.terminate()
break
except queue.Empty:
if code != None: # proc has finished running
break
time.sleep(0.5)
if timeout and time.time() - t_0 > timeout:
proc.terminate()
raise TimeoutException(
''.join(all_output),
'Subprocess exceeded timeout of %ds' % timeout)
finally:
log_thread.stop()
log_thread.join()
return (code, ''.join(all_output))
def log_process_after_completion(proc, echo=None, timeout=None,
log_file=None):
""" Wait for proc to complete and return a tuple containing the exit code of
proc and the contents of stdout. Unlike log_process_in_real_time, does not
attempt to read stdout from proc in real time.
proc: an instance of Popen referring to a running subprocess.
echo: boolean indicating whether to print the output received from proc.stdout
timeout: number of seconds allotted for the process to run. Raises a
TimeoutException if the run time exceeds the timeout.
log_file: an open file for writing outout
"""
if echo is None:
echo = VERBOSE
t_0 = time.time()
code = None
while code is None:
if timeout and time.time() - t_0 > timeout:
raise TimeoutException(
proc.communicate()[0],
'Subprocess exceeded timeout of %ds' % timeout)
time.sleep(0.5)
code = proc.poll()
output = proc.communicate()[0]
if echo:
print(output)
if log_file:
log_file.write(output)
log_file.flush()
return (code, output)
def run(cmd, echo=None, shell=False, timeout=None, print_timestamps=True,
log_in_real_time=True):
""" Run 'cmd' in a shell and return the combined contents of stdout and
stderr (Blocking). Throws an exception if the command exits non-zero.
cmd: list of strings (or single string, iff shell==True) indicating the
command to run
echo: boolean indicating whether we should print the command and log output
shell: boolean indicating whether we are using advanced shell features. Use
only when absolutely necessary, since this allows a lot more freedom which
could be exploited by malicious code. See the warning here:
http://docs.python.org/library/subprocess.html#popen-constructor
timeout: optional, integer indicating the maximum elapsed time in seconds
print_timestamps: boolean indicating whether a formatted timestamp should be
prepended to each line of output. Unused if echo or log_in_real_time is
False.
log_in_real_time: boolean indicating whether to read stdout from the
subprocess in real time instead of when the process finishes. If echo is
False, we never log in real time, even if log_in_real_time is True.
"""
if echo is None:
echo = VERBOSE
proc = run_async(cmd, echo=echo, shell=shell)
# If we're not printing the output, we don't care if the output shows up in
# real time, so don't bother.
if log_in_real_time and echo:
(returncode, output) = log_process_in_real_time(proc, echo=echo,
timeout=timeout, print_timestamps=print_timestamps)
else:
(returncode, output) = log_process_after_completion(proc, echo=echo,
timeout=timeout)
if returncode != 0:
raise CommandFailedException(
output,
'Command failed with code %d: %s' % (returncode, cmd))
return output
def run_retry(cmd, echo=None, shell=False, attempts=1,
secs_between_attempts=DEFAULT_SECS_BETWEEN_ATTEMPTS,
timeout=None, print_timestamps=True):
""" Wrapper for run() which makes multiple attempts until either the command
succeeds or the maximum number of attempts is reached. """
if echo is None:
echo = VERBOSE
attempt = 1
while True:
try:
return run(cmd, echo=echo, shell=shell, timeout=timeout,
print_timestamps=print_timestamps)
except CommandFailedException:
if attempt >= attempts:
raise
print('Command failed. Retrying in %d seconds...' % secs_between_attempts)
time.sleep(secs_between_attempts)
attempt += 1
|
3784868bfa49dfc8364768464b0465ccae6a2a14
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/grandchallenge/verifications/views.py
|
90c03fd20eab9ae8c5a7d1d7b37da27aee0da57a
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,576
|
py
|
views.py
|
from django.contrib import messages
from django.contrib.auth.mixins import (
PermissionRequiredMixin,
UserPassesTestMixin,
)
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.utils.html import format_html
from django.utils.timezone import now
from django.views.generic import CreateView, DetailView, FormView
from guardian.mixins import LoginRequiredMixin
from grandchallenge.evaluation.models import Submission
from grandchallenge.subdomains.utils import reverse
from grandchallenge.verifications.forms import (
ConfirmEmailForm,
VerificationForm,
)
from grandchallenge.verifications.models import (
Verification,
VerificationUserSet,
)
class VerificationRequiredMixin(UserPassesTestMixin):
"""Mixin for views that require verification"""
def test_func(self):
try:
verified = self.request.user.verification.is_verified
except ObjectDoesNotExist:
verified = False
if not verified:
messages.error(
self.request,
format_html(
"You need to verify your account before you can do this, "
"you can request this <a href='{}'>on this page</a>.",
reverse("verifications:create"),
),
)
return verified
class VerificationCreate(LoginRequiredMixin, CreateView):
form_class = VerificationForm
model = Verification
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({"user": self.request.user})
return kwargs
def get_success_url(self):
return reverse("verifications:detail")
class VerificationDetail(LoginRequiredMixin, DetailView):
model = Verification
def get_object(self, queryset=None):
try:
return self.request.user.verification
except ObjectDoesNotExist:
raise Http404("User not found")
class ConfirmEmailView(LoginRequiredMixin, FormView):
form_class = ConfirmEmailForm
template_name = "verifications/confirm_email_form.html"
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update(
{"user": self.request.user, "token": self.kwargs["token"]}
)
return kwargs
def form_valid(self, form):
response = super().form_valid(form=form)
self.request.user.verification.email_is_verified = True
self.request.user.verification.email_verified_at = now()
self.request.user.verification.save()
return response
def get_success_url(self):
return reverse("verifications:detail")
class VerificationUserSetDetail(
LoginRequiredMixin, PermissionRequiredMixin, DetailView
):
model = VerificationUserSet
permission_required = "verifications.view_verificationuserset"
def get_queryset(self):
queryset = super().get_queryset()
queryset = queryset.prefetch_related(
"users__verification", "users__user_profile"
)
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(
{
"submissions": Submission.objects.filter(
creator__in=self.object.users.all()
).select_related(
"creator__verification",
"creator__user_profile",
"phase__challenge",
)
}
)
return context
|
41c38ebc2a7a0e1ed27e9f667428698471d8683c
|
7ae27ce9a8c477855f8fd5fac54685716d868349
|
/installer/lib/installer.py
|
aaf5779801ce1cffbf056c3c96023d37555df5cb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
invoke-ai/InvokeAI
|
5f7a2c1f19b1f686099a8cf4cec85aa9c7b6d81d
|
2bd3cf28eabff2dcf3339669be222061dd208cb8
|
refs/heads/main
| 2023-08-31T07:06:56.721576
| 2023-08-30T19:05:17
| 2023-08-30T19:05:17
| 525,592,995
| 15,987
| 1,678
|
Apache-2.0
| 2023-09-14T20:29:39
| 2022-08-17T01:04:27
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 16,886
|
py
|
installer.py
|
# Copyright (c) 2023 Eugene Brodsky (https://github.com/ebr)
"""
InvokeAI installer script
"""
import os
import platform
import shutil
import subprocess
import sys
import venv
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Union
SUPPORTED_PYTHON = ">=3.9.0,<=3.11.100"
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp"
OS = platform.uname().system
ARCH = platform.uname().machine
VERSION = "latest"
### Feature flags
# Install the virtualenv into the runtime dir
FF_VENV_IN_RUNTIME = True
# Install the wheel packaged with the installer
FF_USE_LOCAL_WHEEL = True
class Installer:
"""
Deploys an InvokeAI installation into a given path
"""
def __init__(self) -> None:
self.reqs = INSTALLER_REQS
self.preflight()
if os.getenv("VIRTUAL_ENV") is not None:
print("A virtual environment is already activated. Please 'deactivate' before installation.")
sys.exit(-1)
self.bootstrap()
def preflight(self) -> None:
"""
Preflight checks
"""
# TODO
# verify python version
# on macOS verify XCode tools are present
# verify libmesa, libglx on linux
# check that the system arch is not i386 (?)
# check that the system has a GPU, and the type of GPU
pass
def mktemp_venv(self) -> TemporaryDirectory:
"""
Creates a temporary virtual environment for the installer itself
:return: path to the created virtual environment directory
:rtype: TemporaryDirectory
"""
# Cleaning up temporary directories on Windows results in a race condition
# and a stack trace.
# `ignore_cleanup_errors` was only added in Python 3.10
# users of Python 3.9 will see a gnarly stack trace on installer exit
if OS == "Windows" and int(platform.python_version_tuple()[1]) >= 10:
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX, ignore_cleanup_errors=True)
else:
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX)
venv.create(venv_dir.name, with_pip=True)
self.venv_dir = venv_dir
set_sys_path(Path(venv_dir.name))
return venv_dir
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory:
"""
Bootstrap the installer venv with packages required at install time
:return: path to the virtual environment directory that was bootstrapped
:rtype: TemporaryDirectory
"""
print("Initializing the installer. This may take a minute - please wait...")
venv_dir = self.mktemp_venv()
pip = get_pip_from_venv(Path(venv_dir.name))
cmd = [pip, "install", "--require-virtualenv", "--use-pep517"]
cmd.extend(self.reqs)
try:
res = subprocess.check_output(cmd).decode()
if verbose:
print(res)
return venv_dir
except subprocess.CalledProcessError as e:
print(e)
def app_venv(self, path: str = None):
"""
Create a virtualenv for the InvokeAI installation
"""
# explicit venv location
# currently unused in normal operation
# useful for testing or special cases
if path is not None:
venv_dir = Path(path)
# experimental / testing
elif not FF_VENV_IN_RUNTIME:
if OS == "Windows":
venv_dir_parent = os.getenv("APPDATA", "~/AppData/Roaming")
elif OS == "Darwin":
# there is no environment variable on macOS to find this
# TODO: confirm this is working as expected
venv_dir_parent = "~/Library/Application Support"
elif OS == "Linux":
venv_dir_parent = os.getenv("XDG_DATA_DIR", "~/.local/share")
venv_dir = Path(venv_dir_parent).expanduser().resolve() / f"InvokeAI/{VERSION}/venv"
# stable / current
else:
venv_dir = self.dest / ".venv"
# Prefer to copy python executables
# so that updates to system python don't break InvokeAI
try:
venv.create(venv_dir, with_pip=True)
# If installing over an existing environment previously created with symlinks,
# the executables will fail to copy. Keep symlinks in that case
except shutil.SameFileError:
venv.create(venv_dir, with_pip=True, symlinks=True)
# upgrade pip in Python 3.9 environments
if int(platform.python_version_tuple()[1]) == 9:
from plumbum import FG, local
pip = local[get_pip_from_venv(venv_dir)]
pip["install", "--upgrade", "pip"] & FG
return venv_dir
def install(
self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None
) -> None:
"""
Install the InvokeAI application into the given runtime path
:param root: Destination path for the installation
:type root: str
:param version: InvokeAI version to install
:type version: str
:param yes: Accept defaults to all questions
:type yes: bool
:param find_links: A local directory to search for requirement wheels before going to remote indexes
:type find_links: Path
"""
import messages
messages.welcome()
default_path = os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve()
self.dest = default_path if yes_to_all else messages.dest_path(root)
# create the venv for the app
self.venv = self.app_venv()
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version)
# install dependencies and the InvokeAI application
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
self.instance.install(
extra_index_url,
optional_modules,
find_links,
)
# install the launch/update scripts into the runtime directory
self.instance.install_user_scripts()
# run through the configuration flow
self.instance.configure()
class InvokeAiInstance:
"""
Manages an installed instance of InvokeAI, comprising a virtual environment and a runtime directory.
The virtual environment *may* reside within the runtime directory.
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
"""
def __init__(self, runtime: Path, venv: Path, version: str) -> None:
self.runtime = runtime
self.venv = venv
self.pip = get_pip_from_venv(venv)
self.version = version
set_sys_path(venv)
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
def get(self) -> tuple[Path, Path]:
"""
Get the location of the virtualenv directory for this installation
:return: Paths of the runtime and the venv directory
:rtype: tuple[Path, Path]
"""
return (self.runtime, self.venv)
def install(self, extra_index_url=None, optional_modules=None, find_links=None):
"""
Install this instance, including dependencies and the app itself
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
:type extra_index_url: str
"""
import messages
# install torch first to ensure the correct version gets installed.
# works with either source or wheel install with negligible impact on installation times.
messages.simple_banner("Installing PyTorch :fire:")
self.install_torch(extra_index_url, find_links)
messages.simple_banner("Installing the InvokeAI Application :art:")
self.install_app(extra_index_url, optional_modules, find_links)
def install_torch(self, extra_index_url=None, find_links=None):
"""
Install PyTorch
"""
from plumbum import FG, local
pip = local[self.pip]
(
pip[
"install",
"--require-virtualenv",
"numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2
"urllib3~=1.26.0",
"requests~=2.28.0",
"torch~=2.0.0",
"torchmetrics==0.11.4",
"torchvision>=0.14.1",
"--force-reinstall",
"--find-links" if find_links is not None else None,
find_links,
"--extra-index-url" if extra_index_url is not None else None,
extra_index_url,
]
& FG
)
def install_app(self, extra_index_url=None, optional_modules=None, find_links=None):
"""
Install the application with pip.
Supports installation from PyPi or from a local source directory.
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
:type extra_index_url: str
:param optional_modules: optional modules to install using "[module1,module2]" format.
:type optional_modules: str
:param find_links: path to a directory containing wheels to be searched prior to going to the internet
:type find_links: Path
"""
## this only applies to pypi installs; TODO actually use this
if self.version == "pre":
version = None
pre = "--pre"
else:
version = self.version
pre = None
## TODO: only local wheel will be installed as of now; support for --version arg is TODO
if FF_USE_LOCAL_WHEEL:
# if no wheel, try to do a source install before giving up
try:
src = str(next(Path(__file__).parent.glob("InvokeAI-*.whl")))
except StopIteration:
try:
src = Path(__file__).parents[1].expanduser().resolve()
# if the above directory contains one of these files, we'll do a source install
next(src.glob("pyproject.toml"))
next(src.glob("invokeai"))
except StopIteration:
print("Unable to find a wheel or perform a source install. Giving up.")
elif version == "source":
# this makes an assumption about the location of the installer package in the source tree
src = Path(__file__).parents[1].expanduser().resolve()
else:
# will install from PyPi
src = f"invokeai=={version}" if version is not None else "invokeai"
from plumbum import FG, local
pip = local[self.pip]
(
pip[
"install",
"--require-virtualenv",
"--use-pep517",
str(src) + (optional_modules if optional_modules else ""),
"--find-links" if find_links is not None else None,
find_links,
"--extra-index-url" if extra_index_url is not None else None,
extra_index_url,
pre,
]
& FG
)
def configure(self):
"""
Configure the InvokeAI runtime directory
"""
# set sys.argv to a consistent state
new_argv = [sys.argv[0]]
for i in range(1, len(sys.argv)):
el = sys.argv[i]
if el in ["-r", "--root"]:
new_argv.append(el)
new_argv.append(sys.argv[i + 1])
elif el in ["-y", "--yes", "--yes-to-all"]:
new_argv.append(el)
sys.argv = new_argv
import requests # to catch download exceptions
from messages import introduction
introduction()
from invokeai.frontend.install.invokeai_configure import invokeai_configure
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
# from the installer will also automatically propagate down to the config script.
# this may change in the future with config refactoring!
succeeded = False
try:
invokeai_configure()
succeeded = True
except requests.exceptions.ConnectionError as e:
print(f"\nA network error was encountered during configuration and download: {str(e)}")
except OSError as e:
print(f"\nAn OS error was encountered during configuration and download: {str(e)}")
except Exception as e:
print(f"\nA problem was encountered during the configuration and download steps: {str(e)}")
finally:
if not succeeded:
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
print("and choose option 7 to fix a broken install, optionally followed by option 5 to install models.")
print("Alternatively you can relaunch the installer.")
def install_user_scripts(self):
"""
Copy the launch and update scripts to the runtime dir
"""
ext = "bat" if OS == "Windows" else "sh"
# scripts = ['invoke', 'update']
scripts = ["invoke"]
for script in scripts:
src = Path(__file__).parent / ".." / "templates" / f"{script}.{ext}.in"
dest = self.runtime / f"{script}.{ext}"
shutil.copy(src, dest)
os.chmod(dest, 0o0755)
def update(self):
pass
def remove(self):
pass
### Utility functions ###
def get_pip_from_venv(venv_path: Path) -> str:
"""
Given a path to a virtual environment, get the absolute path to the `pip` executable
in a cross-platform fashion. Does not validate that the pip executable
actually exists in the virtualenv.
:param venv_path: Path to the virtual environment
:type venv_path: Path
:return: Absolute path to the pip executable
:rtype: str
"""
pip = "Scripts\\pip.exe" if OS == "Windows" else "bin/pip"
return str(venv_path.expanduser().resolve() / pip)
def set_sys_path(venv_path: Path) -> None:
"""
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
such that packages from the given venv may be imported in the current process.
Ensure that the packages from system environment are not visible (emulate
the virtual env 'activate' script) - this doesn't work on Windows yet.
:param venv_path: Path to the virtual environment
:type venv_path: Path
"""
# filter out any paths in sys.path that may be system- or user-wide
# but leave the temporary bootstrap virtualenv as it contains packages we
# temporarily need at install time
sys.path = list(filter(lambda p: not p.endswith("-packages") or p.find(BOOTSTRAP_VENV_PREFIX) != -1, sys.path))
# determine site-packages/lib directory location for the venv
lib = "Lib" if OS == "Windows" else f"lib/python{sys.version_info.major}.{sys.version_info.minor}"
# add the site-packages location to the venv
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
def get_torch_source() -> (Union[str, None], str):
"""
Determine the extra index URL for pip to use for torch installation.
This depends on the OS and the graphics accelerator in use.
This is only applicable to Windows and Linux, since PyTorch does not
offer accelerated builds for macOS.
Prefer CUDA-enabled wheels if the user wasn't sure of their GPU, as it will fallback to CPU if possible.
A NoneType return means just go to PyPi.
:return: tuple consisting of (extra index url or None, optional modules to load or None)
:rtype: list
"""
from messages import graphical_accelerator
# device can be one of: "cuda", "rocm", "cpu", "idk"
device = graphical_accelerator()
url = None
optional_modules = "[onnx]"
if OS == "Linux":
if device == "rocm":
url = "https://download.pytorch.org/whl/rocm5.4.2"
elif device == "cpu":
url = "https://download.pytorch.org/whl/cpu"
if device == "cuda":
url = "https://download.pytorch.org/whl/cu118"
optional_modules = "[xformers,onnx-cuda]"
if device == "cuda_and_dml":
url = "https://download.pytorch.org/whl/cu118"
optional_modules = "[xformers,onnx-directml]"
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
return (url, optional_modules)
|
47b10ae853b0b97fee72dc1bcb08b81773ed79f2
|
e75c5412063078c9ea3e7c71a8dc7a2026083a34
|
/astropy/io/fits/tests/test_table.py
|
bac9d3c56b409255990304d3e18483b7d794eecf
|
[
"BSD-3-Clause"
] |
permissive
|
astropy/astropy
|
d6636f24acdf2b18fc3e413ca0c4b1162a63dd41
|
53188c39a23c33b72df5850ec59e31886f84e29d
|
refs/heads/main
| 2023-08-27T18:16:44.061375
| 2023-08-27T16:07:35
| 2023-08-27T16:07:35
| 2,081,289
| 3,922
| 1,935
|
BSD-3-Clause
| 2023-09-14T09:23:26
| 2011-07-21T01:33:49
|
Python
|
UTF-8
|
Python
| false
| false
| 153,101
|
py
|
test_table.py
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import numpy as np
import pytest
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.io.fits.column import NUMPY2FITS, ColumnAttribute, Delayed
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from astropy.table import Table
from astropy.units import Unit, UnitsWarning, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from .conftest import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == "float32" or bb.dtype.name == "float32":
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.0
if np.any(mask0):
if diff[mask0].max() != 0.0:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == "S":
fielda = decode_ascii(fielda)
if fieldb.dtype.char == "S":
fieldb = decode_ascii(fieldb)
if not isinstance(fielda, type(fieldb)) and not isinstance(
fieldb, type(fielda)
):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f"field {i} type differs")
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
elif isinstance(fielda, fits.column._VLF) or isinstance(
fieldb, fits.column._VLF
):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f"fielda[{row}]: {fielda[row]}")
print(f"fieldb[{row}]: {fieldb[row]}")
print(f"field {i} differs in row {row}")
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [
k for k, v in fits.Column.__dict__.items() if isinstance(v, ColumnAttribute)
]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr["FILENAME"] = "labq01i3q_rawtag.fits"
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert thdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self, home_is_data):
# open some existing FITS files:
tt = fits.open(self.data("tb.fits"))
fd = fits.open(self.data("test0.fits"))
# create some local arrays
a1 = chararray.array(["abc", "def", "xx"])
r1 = np.array([11.0, 12.0, 13.0], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name="abc", format="3A", array=a1)
c2 = fits.Column(name="def", format="E", array=r1)
a3 = np.array([3, 4, 5], dtype="i2")
c3 = fits.Column(name="xyz", format="I", array=a3)
a4 = np.array([1, 2, 3], dtype="i2")
c4 = fits.Column(name="t1", format="I", array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype="c8")
c5 = fits.Column(name="t2", format="C", array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name="t3", format="X", array=a6)
a7 = np.array([101, 102, 103], dtype="i4")
c7 = fits.Column(name="t4", format="J", array=a7)
a8 = np.array(
[
[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
],
dtype=np.uint8,
)
c8 = fits.Column(name="t5", format="11X", array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view("bool")).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field("abc")) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp("tableout1.fits"), overwrite=True)
with fits.open(self.temp("tableout1.fits")) as f2:
exp = [True, True, False, True, False, True, True, True, False, False, True]
temp = f2[1].data.field(7)
assert (temp[0] == exp).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp("tableout2.fits"), "append")
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data("tb.fits"))
assert t[1].header["tform1"] == "1J"
info = {
"name": ["c1", "c2", "c3", "c4"],
"format": ["1J", "3A", "1E", "1L"],
"unit": ["", "", "", ""],
"null": [-2147483647, "", "", ""],
"bscale": ["", "", 3, ""],
"bzero": ["", "", 0.4, ""],
"disp": ["I11", "A3", "G15.7", "L6"],
"start": ["", "", "", ""],
"dim": ["", "", "", ""],
"coord_inc": ["", "", "", ""],
"coord_type": ["", "", "", ""],
"coord_unit": ["", "", "", ""],
"coord_ref_point": ["", "", "", ""],
"coord_ref_value": ["", "", "", ""],
"time_ref_pos": ["", "", "", ""],
}
assert t[1].columns.info(output=False) == info
ra = np.rec.array(
[(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)],
names="c1, c2, c3, c4",
)
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field("c4")[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, "c4")) == "[84 84]"
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data("ascii.fits"))
ra1 = np.rec.array(
[
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345),
],
names="c1, c2",
)
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names="c1, c2")
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array(
[(10.123000144958496, 37), (15.609999656677246, 17), (345.0, 345)],
names="c1, c2",
)
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(["abcd", "def"])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name="abc", format="A3", start=19, array=a1)
c2 = fits.Column(name="def", format="E", start=3, array=r1)
c3 = fits.Column(name="t1", format="I", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert dict(hdu.data.dtype.fields) == {
"abc": (np.dtype("|S3"), 18),
"def": (np.dtype("|S15"), 2),
"t1": (np.dtype("|S10"), 21),
}
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11.0, 12.0])
c2 = fits.Column(name="def", format="D", array=r1, bscale=2.3, bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with open(self.temp("toto.fits")) as f:
assert "4.95652173913043548D+00" in f.read()
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name="t2", format="I2", array=[91, 92, 93])
c2 = fits.Column(name="t4", format="I5", array=[91, 92, 93])
c3 = fits.Column(name="t8", format="I10", array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype="uint8")
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
hduL = fits.open(self.temp("testendian.fits"))
rfiHDU = hduL["RFI"]
data = rfiHDU.data
channelsOut = data.field("Channels")[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1.0, 2.0, 3.0, 4.0]
a1 = np.array(a, dtype="<f8")
a2 = np.array(a, dtype=">f8")
col1 = fits.Column(name="a", format="D", array=a1)
col2 = fits.Column(name="b", format="D", array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data["a"] == a1).all()
assert (tbhdu.data["b"] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp("testendian.fits"), overwrite=True)
with fits.open(self.temp("testendian.fits")) as hdul:
assert (hdul[1].data["a"] == a2).all()
assert (hdul[1].data["b"] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float32,a10",
names="order,name,mag,Sp",
)
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "S20", "float32", "S10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype(
{
"names": ["order", "name", "mag", "Sp"],
"formats": ["int", "U20", "float32", "U10"],
}
)
a = np.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
dtype=desc,
)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp("toto.fits"), overwrite=True)
hdul = fits.open(self.temp("toto.fits"))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array(
[
(1, "Serius", -1.45, "A1V"),
(2, "Canopys", -0.73, "F0Ib"),
(3, "Rigil Kent", -0.1, "G2V"),
],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == "Serius"
assert hdu.data[1][1] == "Canopys"
assert (hdu.data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == "A1V"
assert hdu.data[1][3] == "F0Ib"
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert (hdul[1].data.field(0) == np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == "Serius"
assert hdul[1].data[1][1] == "Canopys"
assert (
hdul[1].data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)
).all()
assert hdul[1].data[0][3] == "A1V"
assert hdul[1].data[1][3] == "F0Ib"
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array(
[(1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib")],
formats="int16,a20,float64,a10",
names="order,name,mag,Sp",
)
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data("tb.fits")) as h:
data = h[1].data
new_data = np.array([(3, "qwe", 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith("FITS_rec(")
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert t1[1].columns._arrays[1] is t1[1].columns.columns[1].array
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp("newtable.fits"))
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 19, "8R x 5C", "[10A, J, 10A, 5E, L]", ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
("NGC5", 412, "", z, False),
("NGC6", 434, "", z, True),
("NGC7", 408, "", z, False),
("NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True),
("NGC2", 334, "", z, False),
("NGC3", 308, "", z, True),
("NCG4", 317, "", z, True),
],
formats="a10,u4,a10,5f4,l",
)
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
col = fits.Column(name="a", array=np.array([1, 2]), format="K")
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ["target", "V_mag", "a"]
array = np.rec.array(
[("NGC1001", 11.1, 1), ("NGC1002", 12.3, 2), ("NGC1003", 15.2, 0)],
formats="a20,f4,i8",
)
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"]
tbhdu.columns.del_col("flag")
assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z),
("NGC2", 334, "", z),
("NGC3", 308, "", z),
("NCG4", 317, "", z),
],
formats="a10,u4,a10,5f4",
)
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col("counts")
tbhdu.columns.del_col("notes")
assert tbhdu.columns.names == ["target", "spectrum"]
array = np.rec.array(
[("NGC1", z), ("NGC2", z), ("NGC3", z), ("NCG4", z)], formats="a10,5f4"
)
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data("table.fits"))
tbhdu = hdul[1]
tbhdu.columns.del_col("V_mag")
assert tbhdu.columns.names == ["target"]
array = np.rec.array([("NGC1001",), ("NGC1002",), ("NGC1003",)], formats="a20")
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
counts = np.array([412, 434, 408, 417])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target1", format="10A", array=names)
c2 = fits.Column(name="counts1", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes1", format="A10")
c4 = fits.Column(name="spectrum1", format="5E")
c5 = fits.Column(name="flag1", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table2.fits"))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp("table1.fits"))
t2 = fits.open(self.temp("table2.fits"))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp("newtable.fits"))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
columns_info = "[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]"
info = [
(0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""),
(1, "", 1, "BinTableHDU", 30, "4R x 10C", columns_info, ""),
]
assert fits.info(self.temp("newtable.fits"), output=False) == info
hdul = fits.open(self.temp("newtable.fits"))
hdu = hdul[1]
assert hdu.columns.names == [
"target",
"counts",
"notes",
"spectrum",
"flag",
"target1",
"counts1",
"notes1",
"spectrum1",
"flag1",
]
z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
array = np.rec.array(
[
("NGC1", 312, "", z, True, "NGC5", 412, "", z, False),
("NGC2", 334, "", z, False, "NGC6", 434, "", z, True),
("NGC3", 308, "", z, True, "NGC7", 408, "", z, False),
("NCG4", 317, "", z, True, "NCG8", 417, "", z, False),
],
formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l",
)
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {"a": 2, "b": "b", "c": 2.3}
data = np.array(
list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])),
dtype=[("a", int), ("b", "S1"), ("c", float)],
)
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
header = hdul[1].header
assert header["TNULL1"] == 2
assert header["TNULL2"] == "b"
assert header["TNULL3"] == 2.3
def test_multidimension_table_from_numpy_rec_columns(self):
"""Regression test for https://github.com/astropy/astropy/issues/5280
and https://github.com/astropy/astropy/issues/5287
multidimentional tables can now be written with the correct TDIM.
Author: Stephen Bailey.
"""
dtype = [
("x", (str, 5)), # 1D column of 5-character strings
("y", (str, 3), (4,)), # 2D column; each row is four 3-char strings
]
data = np.zeros(2, dtype=dtype)
data["x"] = ["abcde", "xyz"]
data["y"][0] = ["A", "BC", "DEF", "123"]
data["y"][1] = ["X", "YZ", "PQR", "999"]
table = Table(data)
# Test convenience functions io.fits.writeto / getdata
fits.writeto(self.temp("test.fits"), data)
dx = fits.getdata(self.temp("test.fits"))
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test fits.BinTableHDU(data) and avoid convenience functions
hdu0 = fits.PrimaryHDU()
hdu1 = fits.BinTableHDU(data)
hx = fits.HDUList([hdu0, hdu1])
hx.writeto(self.temp("test2.fits"))
fx = fits.open(self.temp("test2.fits"))
dx = fx[1].data
fx.close()
assert data["x"].dtype == dx["x"].dtype
assert data["y"].dtype == dx["y"].dtype
assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}"
assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}"
# Test Table write and read
table.write(self.temp("test3.fits"))
tx = Table.read(self.temp("test3.fits"), character_as_bytes=False)
assert table["x"].dtype == tx["x"].dtype
assert table["y"].dtype == tx["y"].dtype
assert np.all(table["x"] == tx["x"]), f"x: {table['x']} != {tx['x']}"
assert np.all(table["y"] == tx["y"]), f"y: {table['y']} != {tx['y']}"
def test_mask_array(self):
t = fits.open(self.data("table.fits"))
tbdata = t[1].data
mask = tbdata.field("V_mag") > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp("newtable.fits"))
hdul = fits.open(self.temp("newtable.fits"))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
row = t1[1].data[2]
assert row["counts"] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ""
assert (c == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)).all()
row["counts"] = 310
assert row["counts"] == 310
row[1] = 315
assert row["counts"] == 315
assert row[1:4]["counts"] == 315
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
assert row["counts"] == 300
row[1:4][0] = 400
assert row[1:4]["counts"] == 400
row[1:4]["counts"] = 300
assert row[1:4]["counts"] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]["counts"] == 500
row[1:4:2][0] = 300
assert row[1:4]["counts"] == 300
pytest.raises(KeyError, lambda r: r[1:4]["flag"], row)
assert row[1:4].field(0) == 300
assert row[1:4].field("counts") == 300
pytest.raises(KeyError, row[1:4].field, "flag")
row[1:4].setfield("counts", 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, "flag", False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name="target", format="10A")
c2 = fits.Column(name="counts", format="J", unit="DN")
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L")
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = (
"NGC1",
312,
"A Note",
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True,
)
# Test assigning data to a tables row using a list
tbhdu.data[3] = [
"JIM1",
"33",
"A Note",
np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32),
True,
]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == "NGC1"
assert tbhdu.columns.columns[2].array[0] == ""
assert (
tbhdu.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == "JIM1"
assert tbhdu.columns.columns[2].array[3] == "A Note"
assert (
tbhdu.columns.columns[3].array[3]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu.columns.columns[4].array[3] == np.True_), (bool, np.bool_)
)
and v
)
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.data._coldefs._arrays[0]
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns.columns[0].array
)
assert id(tbhdu2.data._coldefs.columns[0].array) == id(
tbhdu2.columns._arrays[0]
)
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == "NGC1"
assert tbhdu2.columns.columns[2].array[0] == ""
assert (
tbhdu2.columns.columns[3].array[0]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[0] == np.True_), (bool, np.bool_)
)
and v
)
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == "NGC5"
assert tbhdu2.columns.columns[2].array[4] == ""
assert (
tbhdu2.columns.columns[3].array[4]
== np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[4] == np.False_), (bool, np.bool_)
)
and v
)
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ""
assert tbhdu2.columns.columns[2].array[8] == ""
assert (
tbhdu2.columns.columns[3].array[8]
== np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
).all()
assert (
isinstance(
v := (tbhdu2.columns.columns[4].array[8] == np.False_), (bool, np.bool_)
)
and v
)
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert id(coldefs.columns[0].array) != id(tbhdu.columns.columns[0].array)
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.data._coldefs._arrays[0]
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(
tbhdu.columns.columns[0].array
)
assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0])
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.data._coldefs._arrays[0]
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns.columns[0].array
)
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
tbhdu1.columns._arrays[0]
)
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp("table1.fits"))
t1 = fits.open(self.temp("table1.fits"))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"])
c1 = fits.Column(name="target", format="10A", array=names)
c2 = fits.Column(name="counts", format="J", unit="DN", array=counts)
c3 = fits.Column(name="notes", format="A10")
c4 = fits.Column(name="spectrum", format="5E")
c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
assert id(hdu.data._coldefs.columns[0].array) == id(
hdu.columns.columns[0].array
)
assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0])
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert id(tbhdu1.data._coldefs.columns[0].array) == id(
hdu.data._coldefs._arrays[0]
)
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = hducls(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = hducls(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert "EXTVER" not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header["EXTVER"] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header["EXTVER"] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header["EXTVER"] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name="spam", format="E", array=[42.0])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name="flag", format="2L", array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (
tbhdu1.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu1.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (
tbhdu.data.field("flag")[0] == np.array([True, False], dtype=bool)
).all()
assert (
tbhdu.data.field("flag")[1] == np.array([False, True], dtype=bool)
).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data("table.fits"))
assert (tbdata.V_mag == tbdata.field("V_mag")).all()
assert (tbdata.V_mag == tbdata["V_mag"]).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data("tb.fits"))
for col in ("c1", "c2", "c3", "c4"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data("ascii.fits"))
for col in ("a", "b"):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(
name="x",
format="PI()",
array=np.array([[45, 56], [11, 12, 13]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data["x"]) == type(hdu.data.x)
assert (hdu.data["x"][0] == hdu.data.x[0]).all()
assert (hdu.data["x"][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data("zerowidth.fits"))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert "ORBPARM" in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.writeto(self.temp("newtable.fits"))
hdul.close()
hdul = fits.open(self.temp("newtable.fits"))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert "ORBPARM" in tbhdu.columns.names
assert "ORBPARM" in tbhdu.data.names
assert "ORBPARM" in tbhdu.data.dtype.names
assert tbhdu.data[0]["ANNAME"] == "VLA:_W16"
assert comparefloats(
tbhdu.data[0]["STABXYZ"],
np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64),
)
assert tbhdu.data[0]["NOSTA"] == 1
assert tbhdu.data[0]["MNTSTA"] == 0
assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT"
assert comparefloats(
tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64)
)
assert tbhdu.data[-1]["NOSTA"] == 29
assert tbhdu.data[-1]["MNTSTA"] == 0
hdul.close()
def test_string_column_padding(self):
a = ["img1", "img2", "img3a", "p"]
s = (
"img1\x00\x00\x00\x00\x00\x00"
"img2\x00\x00\x00\x00\x00\x00"
"img3a\x00\x00\x00\x00\x00"
"p\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
acol = fits.Column(name="MEMNAME", format="A10", array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode("raw-unicode-escape") == s
ahdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s.replace(
"\x00", " "
)
assert (hdul[1].data["MEMNAME"] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s
assert (hdul[1].data["MEMNAME"] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[
([0, 1, 2, 3, 4, 5], "row1" * 2),
([6, 7, 8, 9, 0, 1], "row2" * 2),
([2, 3, 4, 5, 6, 7], "row3" * 2),
],
formats="6i4,a8",
)
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("newtable.fits"))
with fits.open(self.temp("newtable.fits"), mode="update") as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header["TDIM1"] = "(2,3)"
hdul[1].header["TDIM2"] = "(4,2)"
with fits.open(self.temp("newtable.fits")) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (
c1
== np.array(
[
[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]],
]
)
).all()
assert (
c2 == np.array([["row1", "row1"], ["row2", "row2"], ["row3", "row3"]])
).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", 4)])
data["x"] = 1, 2, 3
data["s"] = "ok"
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", (4, 3))])
data["x"] = 1, 2, 3
data["s"] = "ok"
del t
fits.writeto(self.temp("newtable.fits"), data, overwrite=True)
t = fits.getdata(self.temp("newtable.fits"))
assert t.field(1).dtype.str[-1] == "5"
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1,), (2,)], dtype=([("x", "i4", (1,))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp("onedtable.fits"))
with fits.open(self.temp("onedtable.fits")) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header["TDIM1"] == "(1)"
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b"abcd", b"efgh"], [b"ijkl", b"mnop"], [b"qrst", b"uvwx"]]
arr = np.array(
[(data,), (data,), (data,), (data,), (data,)], dtype=[("S", "(3, 2)S4")]
)
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(4,2,3)"
assert tbhdu2.data["S"].shape == (5, 3, 2)
assert tbhdu.data["S"].dtype.str.endswith("U4")
assert np.all(tbhdu2.data["S"] == tbhdu.data["S"])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b"ab", b"cd"], [b"ef", b"gh"], [b"ij", b"kl"]]
arr2 = [1, 2, 3, 4, 5]
arr = np.array(
[(arr1, arr2), (arr1, arr2)], dtype=[("a", "(3, 2)S2"), ("b", "5i8")]
)
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp("test.fits"), "wb") as f:
f.write(raw_bytes.replace(b"(2,2,3)", b"(2,2,2)"))
with fits.open(self.temp("test.fits")) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header["TDIM1"] == "(2,2,2)"
assert tbhdu2.header["TFORM1"] == "12A"
for row in tbhdu2.data:
assert np.all(row["a"] == [["ab", "cd"], ["ef", "gh"]])
assert np.all(row["b"] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [["abc", "def", "ghi"], ["jkl", "mno", "pqr"], ["stu", "vwx", "yz "]]
recarr = np.rec.array([(data,), (data,)], formats=["(3,3)S3"])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
with fits.open(self.temp("test.fits")) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as h:
assert "TDIM1" in h[1].header
assert h[1].header["TDIM1"] == "(3,3,3)"
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (
h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii")
).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(["a", "b"], dtype="|S1")
arrb = np.array([["a", "bc"], ["cd", "e"]], dtype="|S2")
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name="str", format="1A", array=arra),
fits.Column(name="strarray", format="4A", dim="(2,2)", array=arrb),
fits.Column(name="intarray", format="4I", dim="(2, 2)", array=arrc),
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data["str"].encode("ascii") == arra).all()
assert (h[1].data["strarray"].encode("ascii") == arrb).all()
assert (h[1].data["intarray"] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [
fits.Column(name="a", format="20I", dim="(2,2)", array=arra),
fits.Column(name="b", format="4I", dim="(2,2)", array=arrb),
]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM1"] == "20I"
assert h[1].header["TFORM2"] == "4I"
assert h[1].header["TDIM1"] == h[1].header["TDIM2"] == "(2,2)"
assert (h[1].data["a"] == arra).all()
assert (h[1].data["b"] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(
VerifyError, fits.Column, name="a", format="2I", dim="(2,2)", array=arra
)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data("tdim.fits")) as hdulist:
assert hdulist[1].data["V_mag"].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
targets = data.field("target")
s = data[:]
assert (s.field("target") == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field("target") == targets[:n]).all()
s = data[n:]
assert (s.field("target") == targets[n:]).all()
s = data[::2]
assert (s.field("target") == targets[::2]).all()
s = data[::-1]
assert (s.field("target") == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data("table.fits")) as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data("table.fits")) as hdu:
data = hdu[1].data
data["V_mag"] = 0
assert np.all(data["V_mag"] == 0)
data["V_mag"] = 1
assert np.all(data["V_mag"] == 1)
for container in (list, tuple, np.array):
data["V_mag"] = container([1, 2, 3])
assert np.array_equal(data["V_mag"], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data("table.fits"), mode="readonly") as f:
data = f[1].data
s1 = data[data["target"] == "NGC1001"]
s2 = data[np.where(data["target"] == "NGC1001")]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array(
[("a", [1, 2, 3, 4], 0.1), ("b", [5, 6, 7, 8], 0.2)], formats="a1,4i4,f8"
)
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name="c0", format="L", array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name="c2", format="B", array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name="c3", format="I", array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name="c4", format="J", array=a4)
a5 = np.array(["a", "abc", "ab"])
c5 = fits.Column(name="c5", format="A3", array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name="c6", format="D", array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j], dtype=np.complex128)
c7 = fits.Column(name="c7", format="M", array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name="c8", format="PJ()", array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp("data.txt")
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name="names", format="I", array=[1])
c2 = fits.Column(name="formats", format="I", array=[2])
c3 = fits.Column(name="other", format="I", array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ["names", "formats", "other"]
assert t.data.formats == ["I"] * 3
assert (t.data["names"] == [1]).all()
assert (t.data["formats"] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats="|b1,|b1")
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp("table.fits"))
data = fits.getdata(self.temp("table.fits"), ext=1)
assert thdu.columns.formats == ["L", "L"]
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[("a", "?")])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data["a"] == arr["a"]).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column("F1", "L", array=[True, False])
c2 = fits.Column("F2", "L", array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp("table.fits"))
with fits.open(self.temp("table.fits"), mode="update") as hdul:
hdul[1].data["F1"][1] = True
hdul[1].data["F2"][0] = True
with fits.open(self.temp("table.fits")) as hdul:
assert (hdul[1].data["F1"] == [True, True]).all()
assert (hdul[1].data["F2"] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column(
"F1",
"A3",
null="---",
array=np.array(["1.0", "2.0", "---", "3.0"]),
ascii=True,
)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp("test.fits"))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp("test.fits"), mode="update") as h:
h[1].header["TFORM1"] = "E3"
del h[1].header["TNULL1"]
with fits.open(self.temp("test.fits")) as h:
pytest.raises(ValueError, lambda: h[1].data["F1"])
try:
with fits.open(self.temp("test.fits")) as h:
h[1].data["F1"]
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data"
)
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = " "
c1 = fits.Column(
"F1",
format="I8",
null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True,
)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp("ascii_null.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null.fits"), mode="r+") as h:
nulled = h.read().replace("2 ", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null.fits"), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = "NaN"
c2 = fits.Column(
"F1",
format="F12.8",
null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True,
)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp("ascii_null2.fits"))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp("ascii_null2.fits"), mode="r+") as h:
nulled = h.read().replace("3.00000000", " ")
h.seek(0)
h.write(nulled)
with fits.open(self.temp("ascii_null2.fits"), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("tb.fits")) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["NAXIS"] == 2
assert h[1].header["NAXIS1"] == 12
assert h[1].header["NAXIS2"] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data("table.fits")) as h:
h[1].writeto(self.temp("test.fits"))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert "data" not in h[1].__dict__
with fits.open(self.data("table.fits")) as h1:
with fits.open(self.temp("test.fits")) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data("table.fits"))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data("tb.fits")) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata["c1"] == tbdata2["c1"])
assert np.all(tbdata["c2"] == tbdata2["c2"])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(
tbdata["c3"].astype(np.float32) == tbdata2["c3"].astype(np.float32)
)
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata["c4"], "T", "F") == tbdata2["c4"])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match="Field 2 has a repeat count of 0"):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[("a", "i8"), ("b", "S64"), ("c", ("i4", (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header["NAXIS1"] == 96
assert hdu.header["NAXIS2"] == 0
assert hdu.header["TDIM3"] == "(2,3)"
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data("random_groups.fits"))["DATA"]
col = fits.Column(name="TEST", array=data, dim="(3,1,128,1,1)", format="1152E")
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[1].data["TEST"] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data("tb.fits"))
data2 = fits.getdata(self.data("tb.fits"))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1) :] = data2
mask = merged["c1"] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data("tb.fits")))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([("abc",)], dtype=[("a", "S3")])
fits.writeto(self.temp("test.fits"), data)
with fits.open(self.temp("test.fits"), mode="update") as hdul:
hdul[1].data["a"][0] = "XYZ"
assert hdul[1].data["a"][0] == "XYZ"
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].data["a"][0] == "XYZ"
# Test update but with a non-trivial TDIMn
data = np.array(
[([["abc", "def", "geh"], ["ijk", "lmn", "opq"]],)],
dtype=[("a", ("S3", (2, 3)))],
)
fits.writeto(self.temp("test2.fits"), data)
expected = [["abc", "def", "geh"], ["ijk", "XYZ", "opq"]]
with fits.open(self.temp("test2.fits"), mode="update") as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data["a"][0, 1, 1] = "XYZ"
assert np.all(hdul[1].data["a"][0] == expected)
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[1].header["TDIM1"] == "(3,3,2)"
assert np.all(hdul[1].data["a"][0] == expected)
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting("FITS_rec"):
readfile(self.data("memtest.fits"))
@pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph")
@pytest.mark.slow
def test_reference_leak2(self, tmp_path):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_connect import TestMultipleHDU
from .test_core import TestCore
t1 = TestCore()
t1.setup_method()
try:
with _refcounting("FITS_rec"):
t1.test_add_del_columns2()
finally:
t1.teardown_method()
del t1
t2 = self.__class__()
for test_name in [
"test_recarray_to_bintablehdu",
"test_numpy_ndarray_to_bintablehdu",
"test_new_table_from_recarray",
"test_new_fitsrec",
]:
t2.setup_method()
try:
with _refcounting("FITS_rec"):
getattr(t2, test_name)()
finally:
t2.teardown_method()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting("FITS_rec"):
t3.test_read(tmp_path)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data("table.fits")) as hdul:
tbhdu = hdul[1]
datafile = self.temp("data.txt")
cdfile = self.temp("coldefs.txt")
hfile = self.temp("header.txt")
tbhdu.dump(datafile, cdfile, hfile)
msg = (
r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\."
)
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name="A", format="1J", bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp("test.fits"))
# Test that the file wrote out correctly
with fits.open(self.temp("test.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == data)
# Test updating the unsigned int data
hdu.data["A"][0] = 99
hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits"), uint=True) as hdul:
hdu = hdul[1]
assert "TZERO1" in hdu.header
assert hdu.header["TZERO1"] == 2**31
assert hdu.data["A"].dtype == np.dtype("uint32")
assert np.all(hdu.data["A"] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(
name="c1",
array=np.array([1], dtype=">i2"),
format="1I",
bscale=1,
bzero=32768,
)
S = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data["c1"][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data["c1"] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data["c1"][0] = 10
assert X[1].data["c1"][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data["c1"][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -(2**22), 10, 2**23], dtype="i4")
i10 = np.array([2**8, 2**31 - 1, -(2**29), 30, 2**31 - 1], dtype="i8")
i20 = np.array([2**16, 2**63 - 1, -(2**63), 40, 2**63 - 1], dtype="i8")
i02 = np.array([2**8, 2**13, -(2**9), 50, 2**13], dtype="i2")
t0 = Table([i08, i08 * 2, i10, i20, i02])
t1 = Table.read(self.data("ascii_i4-i20.fits"))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
def test_ascii_floattypes(self):
"""Test different float formats."""
col1 = fits.Column(
name="a", format="D", array=np.array([11.1, 12.2]), ascii=True
)
col2 = fits.Column(
name="b", format="D16", array=np.array([15.5, 16.6]), ascii=True
)
col3 = fits.Column(
name="c", format="D16.7", array=np.array([1.1, 2.2]), ascii=True
)
hdu = fits.TableHDU.from_columns([col1, col2, col3])
hdu.writeto(self.temp("foo.fits"))
with fits.open(self.temp("foo.fits"), memmap=False) as hdul:
assert comparerecords(hdul[1].data, hdu.data)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert (
len(objgraph.by_type(type_)) <= refcount
), "More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[[0] * 1571] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
with fits.open(self.temp("toto.fits")) as toto:
q = toto[1].data.field("QUAL_SPE")
assert (q[0][4:8] == np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith("J(1571)")
for code in ("PJ()", "QJ()"):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name="TESTVLF", format=format_code, array=arr)
col2 = fits.Column(name="TESTSCA", format="J", array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data["TESTSCA"]) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data["TESTVLF"]) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data["TESTVLF"][0] == arr[0]).all()
assert (tb_hdu.data["TESTVLF"][9] == arr[9]).all()
assert (tb_hdu.data["TESTVLF"][10] == ([0] * 10)).all()
assert (tb_hdu.data["TESTVLF"][-1] == ([0] * 10)).all()
for code in ("PJ()", "QJ()"):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array(
[np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith("D(2)")
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ("PD()", "QD()"):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array(
[np.array(["a", "b", "c"]), np.array(["d", "e"]), np.array(["f"])], "O"
)
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ["a", "ab", "abc"]
acol = fits.Column(name="testa", format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp("newtable.fits"), overwrite=True)
with fits.open(self.temp("newtable.fits")) as hdul:
assert hdul[1].columns[0].format.endswith("A(3)")
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ("PA()", "QA()"):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(
name="QUAL_SPE", format=format_code, array=[np.arange(1572)] * 225
)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp("toto.fits"), overwrite=True)
data = fits.getdata(self.temp("toto.fits"))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data["QUAL_SPE"], col.array):
assert (row_a == row_b).all()
for code in ("PJ()", "QJ()"):
test(code)
@pytest.mark.skipif(
NUMPY_LT_1_22_1 and sys.platform == "win32",
reason="https://github.com/numpy/numpy/issues/20699",
)
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column("test", format="J", array=np.arange(255))
c1 = fits.Column("A", format="PJ", array=arr1)
c2 = fits.Column("B", format="PJ", array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp("test.fits"), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp("test.fits")) as h:
assert h[1].header["TFORM2"] == "PJ(255)"
assert h[2].header["TFORM2"] == "PJ(255)"
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp("test.fits")) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp("test2.fits"))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp("test2.fits"), mode="append") as new_hdul:
for _ in range(2):
with fits.open(self.temp("test.fits")) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp("test2.fits")) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data("theap-gap.fits"))
data = hdul[1].data
assert data.shape == (500,)
assert data["i"][497] == 497
assert np.array_equal(data["arr"][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name="var",
format="PI()",
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_),
)
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data["var"].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data("variable_length_table.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data["var"].tolist() == [[45, 56], [11, 12, 13]]
@pytest.mark.skipif(sys.maxsize < 2**32, reason="requires 64-bit system")
@pytest.mark.skipif(sys.platform == "win32", reason="Cannot test on Windows")
@pytest.mark.hugemem
def test_heapsize_P_limit(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10812
Check if the error is raised when the heap size is bigger than what can be
indexed with a 32 bit signed int.
"""
# a matrix with variable length array elements is created
nelem = 2**28
matrix = np.zeros(1, dtype=np.object_)
matrix[0] = np.arange(0.0, float(nelem + 1))
col = fits.Column(name="MATRIX", format=f"PD({nelem})", unit="", array=matrix)
t = fits.BinTableHDU.from_columns([col])
t.name = "MATRIX"
with pytest.raises(
ValueError, match="Please consider using the 'Q' format for your file."
):
t.writeto(self.temp("matrix.fits"))
@pytest.mark.skipif(sys.maxsize < 2**32, reason="requires 64-bit system")
@pytest.mark.skipif(sys.platform == "win32", reason="Cannot test on Windows")
@pytest.mark.hugemem
def test_heapsize_Q_limit(self):
"""
Regression test for https://github.com/astropy/astropy/issues/14808
Check if the error is no longer raised when the heap size is bigger than what can be
indexed with a 32 bit signed int.
"""
# a matrix with variable length array elements is created
nelem = 2**28
matrix = np.zeros(1, dtype=np.object_)
matrix[0] = np.arange(0.0, float(nelem + 1))
col = fits.Column(name="MATRIX", format=f"QD({nelem})", unit="", array=matrix)
t = fits.BinTableHDU.from_columns([col])
t.name = "MATRIX"
t.writeto(self.temp("matrix.fits"))
def test_empty_vla_raw_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/12881
Check if empty vla are correctly read.
"""
columns = [
fits.Column(name="integer", format="B", array=(1, 2)),
fits.Column(name="empty", format="PJ", array=([], [])),
]
fits.BinTableHDU.from_columns(columns).writeto(self.temp("bug.fits"))
with fits.open(self.temp("bug.fits")) as hdu:
# We can't compare the whole array since the _VLF is an array of
# objects, hence we compare elementwise
for i in range(len(hdu[1].data["empty"])):
assert np.array_equal(
hdu[1].data["empty"][i], np.array([], dtype=np.int32)
)
def test_multidim_VLA_tables(self):
"""
Check if multidimensional VLF are correctly write and read.
See https://github.com/astropy/astropy/issues/12860
and https://github.com/astropy/astropy/issues/7810
"""
a = np.arange(5)
b = np.arange(7)
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(7)", dim="(7,1)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdus:
print(hdus[1].data["test"][0])
assert hdus[1].columns.formats == ["PD(7)"]
assert np.array_equal(
hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0, 3.0, 4.0]])
)
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
)
a = np.arange(10).reshape((5, 2))
b = np.arange(14).reshape((7, 2))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(14)", dim="(2,7)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(14)"]
assert np.array_equal(
hdus[1].data["test"][0],
np.array([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]),
)
assert np.array_equal(
hdus[1].data["test"][1],
np.array(
[
[0.0, 1.0],
[2.0, 3.0],
[4.0, 5.0],
[6.0, 7.0],
[8.0, 9.0],
[10.0, 11.0],
[12.0, 13.0],
]
),
)
a = np.arange(3).reshape((1, 3))
b = np.arange(6).reshape((2, 3))
array = np.array([a, b], dtype=object)
col = fits.Column(name="test", format="PD(6)", dim="(3,2)", array=array)
fits.BinTableHDU.from_columns([col]).writeto(self.temp("test3.fits"))
with fits.open(self.temp("test3.fits")) as hdus:
assert hdus[1].columns.formats == ["PD(6)"]
assert np.array_equal(hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0]]))
assert np.array_equal(
hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]])
)
def test_heterogeneous_VLA_tables(self):
"""
Check the behaviour of heterogeneous VLA object.
"""
# The column format fix the type of the arrays in the VLF object.
a = np.array([45, 30])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
c1 = fits.Column(name="var", format="PJ()", array=var)
hdu = fits.BinTableHDU.from_columns([c1])
assert hdu.data[0].array.dtype[0].subdtype[0] == "int32"
# Strings in the VLF object can't be added to the table
a = np.array([45, "thirty"])
b = np.array([11.0, 12.0, 13])
var = np.array([a, b], dtype=object)
c1 = fits.Column(name="var", format="PJ()", array=var)
with pytest.raises(
ValueError, match=r"invalid literal for int\(\) with base 10"
):
fits.BinTableHDU.from_columns([c1])
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column("TEST", np.dtype(recformat))
c.format == fitsformat
c = fits.Column("TEST", recformat)
c.format == fitsformat
c = fits.Column("TEST", fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column("TEST", "I4")
assert c.format == "I4"
assert c.format.format == "I"
assert c.format.width == 4
c = fits.Column("TEST", "F15.8")
assert c.format == "F15.8"
assert c.format.format == "F"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "E15.8")
assert c.format.format == "E"
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column("TEST", "D15.8")
assert c.format.format == "D"
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column("TEST", "F10.0")
assert c.format.format == "F"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "E10.0")
assert c.format.format == "E"
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column("TEST", "D10.0")
assert c.format.format == "D"
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column("TEST", "I")
assert c.format == "I"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I", ascii=True)
assert c.format == "I10"
assert c.format.recformat == "i4"
# With specified widths, integer precision should be set appropriately
c = fits.Column("TEST", "I4", ascii=True)
assert c.format == "I4"
assert c.format.recformat == "i2"
c = fits.Column("TEST", "I9", ascii=True)
assert c.format == "I9"
assert c.format.recformat == "i4"
c = fits.Column("TEST", "I12", ascii=True)
assert c.format == "I12"
assert c.format.recformat == "i8"
c = fits.Column("TEST", "E")
assert c.format == "E"
assert c.format.recformat == "f4"
c = fits.Column("TEST", "E", ascii=True)
assert c.format == "E15.7"
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column("TEST", "F")
assert c.format == "F16.7"
c = fits.Column("TEST", "D")
assert c.format == "D"
assert c.format.recformat == "f8"
c = fits.Column("TEST", "D", ascii=True)
assert c.format == "D25.17"
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column("TEST", "F5.0", array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[1].header["TFORM1"] == "F5.0"
assert hdul[1].data["TEST"].dtype == np.dtype("float64")
assert np.all(hdul[1].data["TEST"] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, "TEST")
assert raw.tobytes() == b" 1. 2. 3."
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column("mag", format="E", array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[("A", "<u4", (2,)), ("B", ">u2")])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs["A"].bzero
assert 2**15 == col_defs["B"].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data("tb.fits")) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data("ascii.fits")) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data("random_groups.fits")) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data("zerowidth.fits")) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(
UserWarning,
match=r"Field 2 has a repeat count of 0 in its format code",
):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name="a", format="D")
b = fits.Column(name="b", format="D")
cols = fits.ColDefs([a, b])
assert cols["a"] == cols[0]
assert cols["b"] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns(
[fits.Column("a", format="D"), fits.Column("b", format="D")]
)
b = table.columns["b"]
table.columns.del_col("b")
assert table.data.dtype.names == ("a",)
b.name = "HELLO"
assert b.name == "HELLO"
assert "TTYPE2" not in table.header
assert table.header["TTYPE1"] == "a"
assert table.columns.names == ["a"]
with pytest.raises(KeyError):
table.columns["b"]
# Make sure updates to the remaining column still work
table.columns.change_name("a", "GOODBYE")
with pytest.raises(KeyError):
table.columns["a"]
assert table.columns["GOODBYE"].name == "GOODBYE"
assert table.data.dtype.names == ("GOODBYE",)
assert table.columns.names == ["GOODBYE"]
assert table.data.columns.names == ["GOODBYE"]
table.columns["GOODBYE"].name = "foo"
with pytest.raises(KeyError):
table.columns["GOODBYE"]
assert table.columns["foo"].name == "foo"
assert table.data.dtype.names == ("foo",)
assert table.columns.names == ["foo"]
assert table.data.columns.names == ["foo"]
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column("xcol", format="5X", array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column("pcol", format="PJ", array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column("qcol", format="QJ", array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format="I", array=[1, 2, 3, 4, 5])
assert "Column name must be a string able to fit" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column(
"col",
format=0,
null="Nan",
disp=1,
coord_type=1,
coord_unit=2,
coord_inc="1",
time_ref_pos=1,
coord_ref_point="1",
coord_ref_value="1",
)
err_msgs = [
"keyword arguments to Column were invalid",
"TFORM",
"TNULL",
"TDISP",
"TCTYP",
"TCUNI",
"TCRPX",
"TCRVL",
"TCDLT",
"TRPOS",
]
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="B", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="a", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(
err.value
)
with pytest.raises(VerifyError) as err:
_ = fits.Column("a", format="I", start="-56", array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(
err.value
)
@pytest.mark.parametrize(
"keys",
[
{"TFORM": "Z", "TDISP": "E"},
{"TFORM": "2", "TDISP": "2E"},
{"TFORM": 3, "TDISP": 6.3},
{"TFORM": float, "TDISP": np.float64},
{"TFORM": "", "TDISP": "E.5"},
],
)
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column("col", format=keys["TFORM"], disp=keys["TDISP"])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name="a", array=x, format="E")
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header["TTYPE1"]
hdu.columns[0].name = "b"
def test_table_to_hdu():
from astropy.table import Table
table = Table(
[[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]],
names=["a", "b", "c"],
dtype=["i", "U1", "f"],
)
table["a"].unit = "m/s"
table["b"].unit = "not-a-unit"
table.meta["foo"] = "bar"
with pytest.warns(
UnitsWarning, match="'not-a-unit' did not parse as fits unit"
) as w:
hdu = fits.BinTableHDU(table, header=fits.Header({"TEST": 1}))
assert len(w) == 1
for name in "abc":
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2")
assert hdu.header["FOO"] == "bar"
assert hdu.header["TEST"] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)], dtype=[("x", float), ("y", int)]).view(
fits.FITS_rec
)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmp_path):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr["TUNIT1"] = "pixel"
hdr["TUNIT2"] = "m"
hdr["TUNIT3"] = "m"
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr["TCTYP2"] = "RA---TAN"
hdr["TCTYP3"] = "ANGLE"
hdr["TCRVL2"] = -999.0
hdr["TCRVL3"] = -999.0
hdr["TCRPX2"] = 1.0
hdr["TCRPX3"] = 1.0
hdr["TALEN2"] = 16384
hdr["TALEN3"] = 1024
hdr["TCUNI2"] = "angstrom"
hdr["TCUNI3"] = "deg"
# Other non-relevant keywords
hdr["RA"] = 1.5
hdr["DEC"] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special"
)
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == "s"
assert hdu.columns[1].unit == "pixel"
assert hdu.columns[2].unit is None
assert hdu.header["TUNIT1"] == "s"
assert hdu.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert "TCTYP1" not in hdu.header
assert hdu.header["TCTYP2"] == "RA---TAN"
assert hdu.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu.header["RA"] == 1.5
assert hdu.header["DEC"] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attributes to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmp_path / "test.fits"
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == "s"
assert hdu2.columns[1].unit == "pixel"
assert hdu2.columns[2].unit is None
assert hdu2.header["TUNIT1"] == "s"
assert hdu2.header["TUNIT2"] == "pixel"
assert "TUNIT3" not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == "RA---TAN"
assert hdu2.columns[2].coord_type == "ANGLE"
assert "TCTYP1" not in hdu2.header
assert hdu2.header["TCTYP2"] == "RA---TAN"
assert hdu2.header["TCTYP3"] == "ANGLE"
# Make sure that other keywords are still there
assert hdu2.header["RA"] == 1.5
assert hdu2.header["DEC"] == 3.0
def test_empty_table(tmp_path):
ofile = tmp_path / "emptytable.fits"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
ofile = tmp_path / "emptytable.fits.gz"
hdu = fits.BinTableHDU(header=None, data=None, name="TEST")
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul["TEST"].data.size == 0
def test_a3dtable(tmp_path):
testfile = tmp_path / "test.fits"
hdu = fits.BinTableHDU.from_columns(
[fits.Column(name="FOO", format="J", array=np.arange(10))]
)
hdu.header["XTENSION"] = "A3DTABLE"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].header["XTENSION"] == "A3DTABLE"
with pytest.warns(AstropyUserWarning) as w:
hdul.verify("fix")
assert str(w[0].message) == "Verification reported errors:"
assert str(w[2].message).endswith("Converted the XTENSION keyword to BINTABLE.")
assert hdul[1].header["XTENSION"] == "BINTABLE"
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header["FOO"] = None
hdu.header.cards["FOO"]._value = np.nan
testfile = tmp_path / "test.fits"
hdu.writeto(testfile, output_verify="ignore")
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / "invalid_unit.fits"
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = "1 / (MeV sr s)"
unit = Unit(invalid_unit)
t = Table({"a": [1, 2, 3]})
t.write(path)
with fits.open(path, mode="update") as hdul:
hdul[1].header["TUNIT1"] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t["a"].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict="silent")
assert isinstance(t["a"].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict="raise")
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict="warn")
|
60c430af250d419b2593c3f7513b0967f64fbece
|
1180c0bfe29959d95f3c131e6e839950e528d4ee
|
/28/mjhea0/project/tests/test__config.py
|
07dfaa41463bfd7f517cc6bd338c8f245b8f7355
|
[
"MIT"
] |
permissive
|
pybites/challenges
|
e3e461accd8e7f890aee8007ba5070086ef983fc
|
02b77652d0901e6e06cb9b1e7cb3e59c675445c2
|
refs/heads/community
| 2023-08-20T18:19:02.982214
| 2022-11-17T09:23:31
| 2022-11-17T09:23:31
| 78,264,928
| 764
| 3,115
| null | 2023-07-21T05:58:19
| 2017-01-07T07:17:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
test__config.py
|
# project/server/tests/test_config.py
import unittest
from flask import current_app
from flask_testing import TestCase
from project.server import app
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
self.assertTrue(app.config['DEBUG_TB_ENABLED'] is True)
self.assertFalse(current_app is None)
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is False)
self.assertTrue(app.config['DEBUG_TB_ENABLED'] is False)
if __name__ == '__main__':
unittest.main()
|
c61142d5ad71b8f61f75309864e26c7f77882ccf
|
a5a36aa7200b0be6ea11ad669ba0534ee1b896a6
|
/packages/vaex-core/vaex/multiprocessing.py
|
0ea9249a020630739f23a4d7ca135138072ee8dc
|
[
"MIT",
"MPL-2.0"
] |
permissive
|
vaexio/vaex
|
ec42919f272a723f884fece3c83975112e7a6f30
|
15245cf4332d4423ac58bd737aee27d911a1b252
|
refs/heads/master
| 2023-08-11T08:03:33.248943
| 2023-07-21T10:40:58
| 2023-07-21T10:40:58
| 24,528,468
| 7,892
| 686
|
MIT
| 2023-09-04T05:07:11
| 2014-09-27T09:44:42
|
Python
|
UTF-8
|
Python
| false
| false
| 919
|
py
|
multiprocessing.py
|
import threading
import pyarrow as pa
import vaex.arrow.convert
import vaex.multithreading
import vaex.utils
_pool = None
_pool_lock = threading.Lock()
def _get_pool():
global _pool
global _mempool
# Fast path avoiding lock
if _pool is not None:
return _pool
with _pool_lock:
if _pool is None:
from multiprocessing import Pool
_pool = Pool(vaex.settings.main.process_count)
return _pool
def _trim(ar):
if isinstance(ar, vaex.array_types.supported_arrow_array_types):
ar = vaex.arrow.convert.trim_buffers_for_pickle(ar)
pass
return ar
def apply(f, args, kwargs, multiprocessing):
if multiprocessing:
args = [_trim(k) for k in args]
kwargs = {k:_trim(v) for k, v in kwargs.items()}
result = _get_pool().apply(f, args, kwargs)
return result
else:
return f(*args, **kwargs)
|
18638b34cce37e9ef67079ebf02ea4540cf80f44
|
725ac5a0bf72829be627bf8dc82fdc51ba0f94ae
|
/Text_Generation/GPT2_TitleGen/train.py
|
7128f36c194bf6e473898542d1fc4b165d02dba4
|
[] |
no_license
|
shawroad/NLP_pytorch_project
|
fa14b6e4a156229765e1d552901d0492d8e1def3
|
1272fed2dc8fef78a9ded0f1ae1644d613a3b57b
|
refs/heads/master
| 2023-06-25T02:37:35.503251
| 2023-06-12T10:57:11
| 2023-06-12T10:57:11
| 229,694,655
| 530
| 104
| null | 2020-12-08T09:21:47
| 2019-12-23T06:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 7,487
|
py
|
train.py
|
"""
@file : train.py
@author : xiaolu
@email : luxiaonlp@163.com
@time : 2021-04-30
"""
import torch
import random
import os
from tqdm import tqdm
import numpy as np
from config import set_args
from model import GPT2LMHeadModel
from transformers import BertTokenizer, GPT2Config
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.tensorboard import SummaryWriter
from data_process import GPT2NewsTitleDataSet, collate_func
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
def train(model, train_data, test_data, args):
tb_write = SummaryWriter()
if args.gradient_accumulation_steps < 1:
raise ValueError("gradient_accumulation_steps参数无效,必须大于等于1")
# 计算真实的训练batch_size大小
train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
train_sampler = RandomSampler(train_data)
train_data_loader = DataLoader(train_data, sampler=train_sampler, batch_size=train_batch_size, collate_fn=collate_func)
total_steps = int(len(train_data_loader) * args.num_train_epochs / args.gradient_accumulation_steps)
print("总训练步数为:{}".format(total_steps))
if torch.cuda.is_available():
model.cuda()
# 获取模型所有参数
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
# 设置优化器
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion * total_steps),
num_training_steps=total_steps)
# 将模型调至训练状态
model.train()
title_id = train_data.title_id
tr_loss, logging_loss, min_loss = 0.0, 0.0, 0.0
global_step = 0
# 开始训练模型
torch.cuda.empty_cache()
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_data_loader):
input_ids = batch["input_ids"]
token_type_ids = batch["token_type_ids"]
if torch.cuda.is_available():
input_ids = input_ids.cuda()
token_type_ids = token_type_ids.cuda()
# 获取训练结果
outputs = model(input_ids=input_ids, token_type_ids=token_type_ids, labels=input_ids, title_id=title_id)
loss = outputs[0]
tr_loss += loss.item()
# 将损失值放到Iter中,方便观察
print('Epoch:{}, Step:{}, Loss:{}'.format(epoch, step, loss))
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# 损失进行回传
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# 当训练步数整除累积步数时,进行参数优化
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
# 如果步数整除logging_steps,则记录学习率和训练集损失值
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_write.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_write.add_scalar("train_loss", (tr_loss-logging_loss) /
(args.logging_steps * args.gradient_accumulation_steps), global_step)
logging_loss = tr_loss
# 如果步数整除eval_steps,则进行模型测试,记录测试集的损失
if args.eval_steps > 0 and global_step % args.eval_steps == 0:
eval_loss = evaluate(model, test_data, args)
tb_write.add_scalar("test_loss", eval_loss, global_step)
model.train()
# 每个epoch进行完,则保存模型
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
# 清空cuda缓存
torch.cuda.empty_cache()
def evaluate(model, test_data, args):
'''
模型验证
:param model:
:param test_data:
:param args:
:return:
'''
# 构造测试集的DataLoader
test_sampler = SequentialSampler(test_data)
test_data_loader = DataLoader(test_data, sampler=test_sampler, batch_size=args.test_batch_size, collate_fn=collate_func)
title_id = test_data.title_id
total_loss, total = 0.0, 0.0
# 进行测试
for step, batch in tqdm(enumerate(test_data_loader)):
# 模型设为eval
model.eval()
with torch.no_grad():
input_ids = batch["input_ids"]
token_type_ids = batch["token_type_ids"]
if torch.cuda.is_available():
input_ids = input_ids.cuda()
token_type_ids = token_type_ids.cuda()
# 获取预测结果
outputs = model(input_ids=input_ids, token_type_ids=token_type_ids, labels=input_ids, title_id=title_id)
loss = outputs[0]
loss = loss.item()
# 对loss进行累加
total_loss += loss * len(batch["input_ids"])
total += len(batch["input_ids"])
# 计算最终测试集的loss结果
test_loss = total_loss / total
return test_loss
def main():
# 设置模型训练参数
args = set_args()
# 设置随机种子,方便模型复现
if args.seed:
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# 加载模型的config
model_config = GPT2Config.from_json_file(args.config_path)
# 实例化GPT2LMHeadModel模型,这里我们没有加载预训练好的模型,而是直接从头开始训练。
if args.pretrained_model_path:
model = GPT2LMHeadModel.from_pretrained(args.pretrained_model_path)
else:
# 如果没有指定的预训练模型,则初始化模型
model = GPT2LMHeadModel(config=model_config)
tokenizer = BertTokenizer.from_pretrained(args.vocab_path, do_lower_case=True)
# 将[space]作为一个分割整体,例如:"我爱[Space]中国。",使用原始tokenizer分词结果为"['我', '爱', '[', 'Space', ']', '中', '国', '。']";
# 增加分割符号后的结果为"['我', '爱', '[Space]', '中', '国', '。']"
tokenizer.add_tokens("[Space]", special_tokens=True)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# 加载训练数据和测试数据
train_data = GPT2NewsTitleDataSet(tokenizer, args.max_len, args.title_max_len, args.data_dir, "train", args.train_file_path)
test_data = GPT2NewsTitleDataSet(tokenizer, args.max_len, args.title_max_len, args.data_dir, "test", args.test_file_path)
# 开始训练
train(model, train_data, test_data, args)
if __name__ == '__main__':
main()
|
ecc8ca8bb5de122bf46d6a107ba9e5c165b1e2b7
|
a64358585c6833b254abc909e41f91126ce156b5
|
/scripts/kilt/convert_kilt_100w_passage_tsv_to_jsonl.py
|
8c85bd324b0d1b64d650291ffe74e1d3b61e4a43
|
[
"Apache-2.0"
] |
permissive
|
castorini/pyserini
|
97219f9fea4be31c6276b51b19ba271c9ecd6fd4
|
42b354914b230880c91b2e4e70605b472441a9a1
|
refs/heads/master
| 2023-09-01T18:38:44.259026
| 2023-09-01T01:35:47
| 2023-09-01T01:35:47
| 219,078,084
| 1,070
| 298
|
Apache-2.0
| 2023-09-14T21:52:50
| 2019-11-01T23:36:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,332
|
py
|
convert_kilt_100w_passage_tsv_to_jsonl.py
|
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import argparse
import pickle
import csv
from tqdm import tqdm
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert KILT 100 words passage tsv into a 100-words Passage-level JSONL that can be processed by Pyserini')
parser.add_argument('--input', required=True, help='Path to the kilt_w100_title.tsv file')
parser.add_argument('--mapping', required=True, help='Path to the mapping_KILT_title.p file')
parser.add_argument('--output-dir', required=True, help='Path to the output directory')
parser.add_argument('--concat-title', action="store_true", default=False, help='Concatenate the title into each paragraph')
args = parser.parse_args()
# Map of title -> wikipedia id
KILT_mapping = pickle.load(open(args.mapping, "rb"))
not_found = set()
with open(args.input, 'r') as f, open(os.path.join(args.output_dir, '100w_passage_kilt_knowledgesource.jsonl'), 'w') as outp:
tsv = csv.reader(f, delimiter="\t")
next(tsv) # Get rid of headers
for row in tqdm(tsv, mininterval=10.0, maxinterval=20.0):
i = row[0]
text = row[1]
title = row[2]
if title not in KILT_mapping:
not_found.add(f"{title}#{i}")
continue
wikipedia_id = str(KILT_mapping[title])
doc = {}
doc["id"] = f"{wikipedia_id}#{i}"
doc["wikipedia_title"] = title
doc["wikipedia_id"] = wikipedia_id
doc["contents"] = f"{title}\n{text}" if args.concat_title else text
_ = outp.write(json.dumps(doc))
_ = outp.write('\n')
print(f"Not found: {not_found}")
|
2475d926ee4456689651f3f7b20e2bc8aed434b6
|
a2dbff3e4f7cb1c84fed0835dc76106621e75cb9
|
/UnityPy/math/Color.py
|
fced5e1c55b57f0e680147cc8e3574d3304b87f7
|
[
"MIT"
] |
permissive
|
K0lb3/UnityPy
|
e3325c9b993ad910bd68fdfcf6c55889ebb894ab
|
2fe1be2abfb8e3d53ba062f70390a517f41cfae7
|
refs/heads/master
| 2023-08-19T04:51:40.863686
| 2023-07-18T12:38:31
| 2023-07-18T12:38:31
| 198,518,141
| 612
| 126
|
MIT
| 2023-08-31T11:02:15
| 2019-07-23T22:36:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,539
|
py
|
Color.py
|
from .Vector4 import Vector4
class Color:
R: float
G: float
B: float
A: float
def __init__(self, r: float = 0.0, g: float = 0.0, b: float = 0.0, a: float = 0.0):
self.R = r
self.G = g
self.B = b
self.A = a
def __eq__(self, other):
if isinstance(other, Color):
return self.__dict__ == other.__dict__
else:
return False
def __add__(self, other):
return Color(
self.R + other.R, self.G + other.G, self.B + other.B, self.A + other.A
)
def __sub__(self, other):
return Color(
self.R - other.R, self.G - other.G, self.B - other.B, self.A - other.A
)
def __mul__(self, other):
if isinstance(other, Color):
return Color(
self.R * other.R, self.G * other.G, self.B * other.B, self.A * other.A
)
else:
return Color(self.R * other, self.G * other, self.B * other, self.A * other)
def __div__(self, other):
if isinstance(other, Color):
return Color(
self.R / other.R, self.G / other.G, self.B / other.B, self.A / other.A
)
else:
return Color(self.R / other, self.G / other, self.B / other, self.A / other)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
def Vector4(self):
return Vector4(self.R, self.G, self.B, self.A)
|
649e36987442b8b2fe903381a2a193251e38cb97
|
028ddc5e85d89c26f8320b70d8ffe80f3d5aec52
|
/docs/code/stochastic_processes/spectral/spectral_1d_1v.py
|
25b9d7e1d96f2c4e6dfae00d4a849db6720b9198
|
[
"MIT"
] |
permissive
|
SURGroup/UQpy
|
3b516706e9072c6fac80da0bdfbd23e2193f5844
|
9e98a6279aa5a2ec2d6d4c61226c34712547bcc6
|
refs/heads/master
| 2023-09-04T03:38:35.294389
| 2023-08-04T12:55:02
| 2023-08-04T12:55:02
| 112,795,497
| 215
| 70
|
MIT
| 2023-09-14T14:18:22
| 2017-12-01T23:05:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,921
|
py
|
spectral_1d_1v.py
|
"""
One-dimensional & one variable
=================================================================
In this example, the Spectral Representation Method is used to generate stochastic processes from a prescribed Power
Spectrum. This example illustrates how to use the SRM class for a one dimensional and one variable case and compare the
statistics of the generated stochastic processes with the expected values.
"""
#%% md
#
# Import the necessary libraries. Here we import standard libraries such as numpy and matplotlib, but also need to
# import the SRM class from the StochasticProcesses module of UQpy.
#%%
from UQpy.stochastic_process import SpectralRepresentation
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
#%% md
#
# The input parameters necessary for the generation of the stochastic processes are given below:
#%%
n_sim = 10000 # Num of samples
n = 1 # Num of dimensions
m = 1 # Num of variables
T = 100 # Time(1 / T = dw)
nt = 256 # Num.of Discretized Time
F = 1 / T * nt / 2 # Frequency.(Hz)
nw = 128 # Num of Discretized Freq.
# # Generation of Input Data(Stationary)
dt = T / nt
t = np.linspace(0, T - dt, nt)
dw = F / nw
w = np.linspace(0, F - dw, nw)
#%% md
#
# Make sure that the input parameters are in order to prevent aliasing.
#%%
t_u = 2*np.pi/2/F
if dt>t_u:
print('Error')
#%% md
#
# Defining the Power Spectral Density Function.
#%%
S = 125 / 4 * w ** 2 * np.exp(-5 * w)
SRM_object = SpectralRepresentation(n_sim, S, dt, dw, nt, nw)
samples = SRM_object.samples
fig, ax = plt.subplots()
plt.title('Realisation of the Spectral Representation Method')
plt.plot(t, samples[0, 0])
ax.yaxis.grid(True)
ax.xaxis.grid(True)
plt.show()
print('The mean of the samples is ', np.mean(samples), 'whereas the expected mean is 0.000')
print('The variance of the samples is ', np.var(samples), 'whereas the expected variance is ', np.sum(S)*dw*2)
|
927df458c06693e36a6dbc813092e97271523879
|
150a7b11cb531f8bc2a045aefcf2ebe1d151efa3
|
/ocs_ci/ocs/exceptions.py
|
e43f68d7e34f65198eee84b3c64f0ff4e4df5935
|
[
"MIT"
] |
permissive
|
red-hat-storage/ocs-ci
|
c7ac414e1b86552da0439223dfa9bca39977f31a
|
5e9e504957403148e413326f65c3769bf9d8eb39
|
refs/heads/master
| 2023-08-17T16:19:51.154403
| 2023-08-17T13:27:12
| 2023-08-17T13:27:12
| 179,558,938
| 146
| 210
|
MIT
| 2023-09-14T16:38:44
| 2019-04-04T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 9,358
|
py
|
exceptions.py
|
class CommandFailed(Exception):
pass
class UnexpectedDeploymentConfiguration(Exception):
pass
class UnsupportedOSType(Exception):
pass
class CephHealthException(Exception):
pass
class NoobaaHealthException(Exception):
pass
class NoobaaCliChecksumFailedException(Exception):
pass
class UnexpectedBehaviour(Exception):
pass
class UnexpectedInput(Exception):
pass
class ClassCreationException(Exception):
pass
class ResourceLeftoversException(Exception):
pass
class ObjectsStillBeingDeletedException(Exception):
pass
class TimeoutExpiredError(Exception):
message = "Timed Out"
def __init__(self, value, custom_message=None):
self.value = value
self.custom_message = custom_message
def __str__(self):
if self.custom_message is None:
self.message = f"{self.__class__.message}: {self.value}"
else:
self.message = self.custom_message
return self.message
class TimeoutException(Exception):
pass
class MonCountException(Exception):
pass
class MDSCountException(Exception):
pass
class DeploymentPlatformNotSupported(Exception):
pass
class UnavailableBuildException(Exception):
pass
class PerformanceException(Exception):
pass
class ResourceWrongStatusException(Exception):
def __init__(
self, resource_or_name, describe_out=None, column=None, expected=None, got=None
):
if isinstance(resource_or_name, str):
self.resource = None
self.resource_name = resource_or_name
else:
self.resource = resource_or_name
self.resource_name = self.resource.name
self.describe_out = describe_out
self.column = column
self.expected = expected
self.got = got
def __str__(self):
if self.resource:
msg = f"{self.resource.kind} resource {self.resource_name}"
else:
msg = f"Resource {self.resource_name}"
if self.column:
msg += f" in column {self.column}"
if self.got:
msg += f" was in state {self.got}"
if self.expected:
msg += f" but expected {self.expected}"
if self.describe_out:
msg += f" describe output: {self.describe_out}"
return msg
class UnavailableResourceException(Exception):
pass
class TagNotFoundException(Exception):
pass
class ResourceNameNotSpecifiedException(Exception):
pass
class VMMaxDisksReachedException(Exception):
pass
class SameNamePrefixClusterAlreadyExistsException(Exception):
pass
class MissingRequiredConfigKeyError(Exception):
pass
class NotSupportedFunctionError(Exception):
pass
class NonUpgradedImagesFoundError(Exception):
pass
class NotAllPodsHaveSameImagesError(Exception):
pass
class UnexpectedImage(Exception):
pass
class UnexpectedVolumeType(Exception):
pass
class FailedToAddNodeException(Exception):
pass
class FailedToRemoveNodeException(Exception):
pass
class FailedToDeleteInstance(Exception):
pass
class NoInstallPlanForApproveFoundException(Exception):
pass
class NoobaaConditionException(Exception):
pass
class NodeNotFoundError(Exception):
pass
class ResourceNotFoundError(Exception):
pass
class ChannelNotFound(Exception):
pass
class CSVNotFound(Exception):
pass
class UnsupportedPlatformError(Exception):
pass
class UnsupportedPlatformVersionError(Exception):
pass
class UnsupportedFeatureError(Exception):
pass
class UnsupportedBrowser(Exception):
pass
class OpenshiftConsoleSuiteNotDefined(Exception):
pass
class ServiceUnavailable(Exception):
pass
class InvalidStatusCode(Exception):
pass
class NoBucketPolicyResponse(Exception):
pass
class PSIVolumeCreationFailed(Exception):
pass
class PSIVolumeNotInExpectedState(Exception):
pass
class PSIVolumeDeletionFailed(Exception):
pass
class FlexyDataNotFound(Exception):
pass
class PendingCSRException(Exception):
pass
class RDMDiskNotFound(Exception):
pass
class PassThroughEnabledDeviceNotFound(Exception):
pass
class ExternalClusterDetailsException(Exception):
pass
class ExternalClusterRGWAdminOpsUserException(Exception):
pass
class ExternalClusterExporterRunFailed(Exception):
pass
class ExternalClusterObjectStoreUserCreationFailed(Exception):
pass
class ExternalClusterRGWEndPointMissing(Exception):
pass
class ExternalClusterRGWEndPointPortMissing(Exception):
pass
class ExternalClusterCephfsMissing(Exception):
pass
class ExternalClusterCephSSHAuthDetailsMissing(Exception):
pass
class CredReqSecretNotFound(Exception):
pass
class RhcosImageNotFound(Exception):
pass
class FipsNotInstalledException(Exception):
pass
class StorageNotSufficientException(Exception):
pass
class PoolNotFound(Exception):
pass
class PoolDidNotReachReadyState(Exception):
pass
class PoolStateIsUnknow(Exception):
pass
class PoolNotDeleted(Exception):
pass
class PoolDataNotErased(Exception):
pass
class PoolSizeWrong(Exception):
pass
class PoolCompressionWrong(Exception):
pass
class PoolNotDeletedFromUI(Exception):
pass
class PoolCephValueNotMatch(Exception):
pass
class StorageClassNotDeletedFromUI(Exception):
pass
class PvcNotDeleted(Exception):
pass
class StorageclassNotCreated(Exception):
pass
class StorageclassIsNotDeleted(Exception):
pass
class ResourceNotDeleted(Exception):
pass
class PageNotLoaded(Exception):
pass
class MemoryNotSufficientException(Exception):
pass
class CPUNotSufficientException(Exception):
pass
class PoolNotCompressedAsExpected(Exception):
pass
class PoolNotReplicatedAsNeeded(Exception):
pass
class ImageIsNotDeletedOrNotFound(Exception):
pass
class VaultDeploymentError(Exception):
pass
class VaultOperationError(Exception):
pass
class HPCSDeploymentError(Exception):
pass
class KMIPDeploymentError(Exception):
pass
class KMIPOperationError(Exception):
pass
class KMSNotSupported(Exception):
pass
class KMSConnectionDetailsError(Exception):
pass
class KMSTokenError(Exception):
pass
class KMSResourceCleaneupError(Exception):
pass
class UnhealthyBucket(Exception):
pass
class NotFoundError(Exception):
pass
class ResourcePoolNotFound(Exception):
pass
class ClientDownloadError(Exception):
pass
class NotAllNodesCreated(Exception):
pass
class TemplateNotFound(Exception):
pass
class PVNotSufficientException(Exception):
pass
class IPAMReleaseUpdateFailed(Exception):
pass
class IPAMAssignUpdateFailed(Exception):
pass
class NodeHasNoAttachedVolume(Exception):
pass
class NotSupportedProxyConfiguration(Exception):
pass
class OCSWorkerScaleFailed(Exception):
pass
class OSDScaleFailed(Exception):
pass
class PVCNotCreated(Exception):
pass
class PodNotCreated(Exception):
pass
class RBDSideCarContainerException(Exception):
pass
class ElasticSearchNotDeployed(Exception):
pass
class ManagedServiceAddonDeploymentError(Exception):
pass
class ManagedServiceSecurityGroupNotFound(Exception):
pass
class ConfigurationError(Exception):
pass
class DRPrimaryNotFoundException(Exception):
pass
class InteractivePromptException(Exception):
pass
class BenchmarkTestFailed(Exception):
pass
class ACMClusterDeployException(Exception):
pass
class ACMClusterImportException(Exception):
pass
class RDRDeploymentException(Exception):
pass
class MDRDeploymentException(Exception):
pass
class ACMClusterDestroyException(Exception):
pass
class WrongVersionExpression(ValueError):
pass
class ClusterNotFoundException(Exception):
pass
class AlertingError(Exception):
pass
class AuthError(Exception):
pass
class UnknownCloneTypeException(Exception):
pass
class CephToolBoxNotFoundException(Exception):
pass
class UnsupportedWorkloadError(Exception):
pass
class RebootEventNotFoundException(Exception):
pass
class ConnectivityFail(Exception):
pass
class ROSAProdAdminLoginFailedException(Exception):
pass
class Md5CheckFailed(Exception):
pass
class ZombieProcessFoundException(Exception):
pass
class LvSizeWrong(Exception):
pass
class LvDataPercentSizeWrong(Exception):
pass
class LvThinUtilNotChanged(Exception):
pass
class ThinPoolUtilityWrong(Exception):
pass
class LVMOHealthException(Exception):
pass
class VolumesExistError(Exception):
pass
class ExternalClusterNodeRoleNotFound(Exception):
pass
class UnexpectedODFAccessException(Exception):
pass
class UnknownOperationForTerraformVariableUpdate(Exception):
pass
class TerrafromFileNotFoundException(Exception):
pass
class IncorrectUiOptionRequested(Exception):
def __init__(self, text, func=None):
super().__init__(text)
if func is not None:
func()
class ReturnedEmptyResponseException(Exception):
pass
class ArchitectureNotSupported(Exception):
pass
class PDBNotCreatedException(Exception):
pass
|
f523f5f1acee7719b07a933eb131e4fb973625bc
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/esphome/test_media_player.py
|
ffbe8f50e4855efba1f0477c08adf06ad0a2fa53
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 7,402
|
py
|
test_media_player.py
|
"""Test ESPHome media_players."""
from unittest.mock import AsyncMock, Mock, call
from aioesphomeapi import (
APIClient,
MediaPlayerCommand,
MediaPlayerEntityState,
MediaPlayerInfo,
MediaPlayerState,
)
import pytest
from homeassistant.components import media_source
from homeassistant.components.media_player import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN as MEDIA_PLAYER_DOMAIN,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_STOP,
SERVICE_PLAY_MEDIA,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
BrowseMedia,
MediaClass,
MediaType,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import mock_platform
from tests.typing import WebSocketGenerator
async def test_media_player_entity(
hass: HomeAssistant, mock_client: APIClient, mock_generic_device_entry
) -> None:
"""Test a generic media_player entity."""
entity_info = [
MediaPlayerInfo(
object_id="mymedia_player",
key=1,
name="my media_player",
unique_id="my_media_player",
supports_pause=True,
)
]
states = [
MediaPlayerEntityState(
key=1, volume=50, muted=True, state=MediaPlayerState.PAUSED
)
]
user_service = []
await mock_generic_device_entry(
mock_client=mock_client,
entity_info=entity_info,
user_service=user_service,
states=states,
)
state = hass.states.get("media_player.test_mymedia_player")
assert state is not None
assert state.state == "paused"
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_VOLUME_MUTE,
{
ATTR_ENTITY_ID: "media_player.test_mymedia_player",
ATTR_MEDIA_VOLUME_MUTED: True,
},
blocking=True,
)
mock_client.media_player_command.assert_has_calls(
[call(1, command=MediaPlayerCommand.MUTE)]
)
mock_client.media_player_command.reset_mock()
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_VOLUME_MUTE,
{
ATTR_ENTITY_ID: "media_player.test_mymedia_player",
ATTR_MEDIA_VOLUME_MUTED: True,
},
blocking=True,
)
mock_client.media_player_command.assert_has_calls(
[call(1, command=MediaPlayerCommand.MUTE)]
)
mock_client.media_player_command.reset_mock()
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: "media_player.test_mymedia_player",
ATTR_MEDIA_VOLUME_LEVEL: 0.5,
},
blocking=True,
)
mock_client.media_player_command.assert_has_calls([call(1, volume=0.5)])
mock_client.media_player_command.reset_mock()
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_MEDIA_PAUSE,
{
ATTR_ENTITY_ID: "media_player.test_mymedia_player",
},
blocking=True,
)
mock_client.media_player_command.assert_has_calls(
[call(1, command=MediaPlayerCommand.PAUSE)]
)
mock_client.media_player_command.reset_mock()
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_MEDIA_PLAY,
{
ATTR_ENTITY_ID: "media_player.test_mymedia_player",
},
blocking=True,
)
mock_client.media_player_command.assert_has_calls(
[call(1, command=MediaPlayerCommand.PLAY)]
)
mock_client.media_player_command.reset_mock()
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_MEDIA_STOP,
{
ATTR_ENTITY_ID: "media_player.test_mymedia_player",
},
blocking=True,
)
mock_client.media_player_command.assert_has_calls(
[call(1, command=MediaPlayerCommand.STOP)]
)
mock_client.media_player_command.reset_mock()
async def test_media_player_entity_with_source(
hass: HomeAssistant,
mock_client: APIClient,
hass_ws_client: WebSocketGenerator,
mock_generic_device_entry,
) -> None:
"""Test a generic media_player entity media source."""
esphome_platform_mock = Mock(
async_get_media_browser_root_object=AsyncMock(
return_value=[
BrowseMedia(
title="Spotify",
media_class=MediaClass.APP,
media_content_id="",
media_content_type="spotify",
thumbnail="https://brands.home-assistant.io/_/spotify/logo.png",
can_play=False,
can_expand=True,
)
]
),
async_browse_media=AsyncMock(
return_value=BrowseMedia(
title="Spotify Favourites",
media_class=MediaClass.PLAYLIST,
media_content_id="",
media_content_type="spotify",
can_play=True,
can_expand=False,
)
),
async_play_media=AsyncMock(return_value=False),
)
mock_platform(hass, "test.esphome", esphome_platform_mock)
await async_setup_component(hass, "test", {"test": {}})
await async_setup_component(hass, "media_source", {"media_source": {}})
await hass.async_block_till_done()
entity_info = [
MediaPlayerInfo(
object_id="mymedia_player",
key=1,
name="my media_player",
unique_id="my_media_player",
supports_pause=True,
)
]
states = [
MediaPlayerEntityState(
key=1, volume=50, muted=True, state=MediaPlayerState.PLAYING
)
]
user_service = []
await mock_generic_device_entry(
mock_client=mock_client,
entity_info=entity_info,
user_service=user_service,
states=states,
)
state = hass.states.get("media_player.test_mymedia_player")
assert state is not None
assert state.state == "playing"
with pytest.raises(media_source.error.Unresolvable):
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: "media_player.test_mymedia_player",
ATTR_MEDIA_CONTENT_TYPE: MediaType.MUSIC,
ATTR_MEDIA_CONTENT_ID: "media-source://local/xz",
},
blocking=True,
)
mock_client.media_player_command.reset_mock()
client = await hass_ws_client()
await client.send_json(
{
"id": 1,
"type": "media_player/browse_media",
"entity_id": "media_player.test_mymedia_player",
}
)
response = await client.receive_json()
assert response["success"]
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: "media_player.test_mymedia_player",
ATTR_MEDIA_CONTENT_TYPE: MediaType.URL,
ATTR_MEDIA_CONTENT_ID: "media-source://tts?message=hello",
},
blocking=True,
)
mock_client.media_player_command.assert_has_calls(
[call(1, media_url="media-source://tts?message=hello")]
)
|
ba6a3e1806f58ab64c1abf3ac6351950930e4912
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/api/wb/views.py
|
c67eee23c158a4124e296855a3c4ae13abe69a88
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,365
|
py
|
views.py
|
from rest_framework import status
from rest_framework.exceptions import ValidationError, NotFound
from rest_framework.response import Response
from osf.models import Guid
from rest_framework.views import APIView
from addons.osfstorage.models import OsfStorageFileNode, OsfStorageFolder
from api.base.parsers import HMACSignedParser
from api.wb.serializers import (
WaterbutlerMetadataSerializer,
)
class FileMetadataView(APIView):
"""
Mixin with common code for WB move/copy hooks
"""
parser_classes = (HMACSignedParser,)
serializer_class = WaterbutlerMetadataSerializer
view_category = 'wb'
target_lookup_url_kwarg = 'target_id'
def get_object(self):
return self.get_target(self.kwargs[self.target_lookup_url_kwarg])
def get_target(self, target_id):
guid = Guid.load(target_id)
if not guid:
raise NotFound
target = guid.referent
if getattr(target, 'is_registration', False) and not getattr(target, 'archiving', False):
raise ValidationError('Registrations cannot be changed.')
return target
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
return {
'view': self,
}
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data, context=self.get_serializer_context())
if serializer.is_valid():
source = serializer.validated_data.pop('source')
destination = serializer.validated_data.pop('destination')
name = destination.get('name')
dest_target = self.get_target(target_id=destination.get('target'))
try:
source = OsfStorageFileNode.get(source, self.get_object())
except OsfStorageFileNode.DoesNotExist:
raise NotFound
try:
dest_parent = OsfStorageFolder.get(destination.get('parent'), dest_target)
except OsfStorageFolder.DoesNotExist:
raise NotFound
serializer.save(source=source, destination=dest_parent, name=name)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class MoveFileMetadataView(FileMetadataView):
"""
View for moving file metadata in OsfStorage.
Only WaterButler should talk to this endpoint by sending a signed request.
"""
view_name = 'metadata-move'
# overrides FileMetadataView
def post(self, request, *args, **kwargs):
response = super(MoveFileMetadataView, self).post(request, *args, **kwargs)
if response.status_code == status.HTTP_400_BAD_REQUEST:
return response
response.status_code = status.HTTP_200_OK
return response
def perform_file_action(self, source, destination, name):
ret = source.move_under(destination, name)
return ret
class CopyFileMetadataView(FileMetadataView):
"""
View for copying file metadata in OsfStorage.
Only WaterButler should talk to this endpoint by sending a signed request.
"""
view_name = 'metadata-copy'
def perform_file_action(self, source, destination, name):
ret = source.copy_under(destination, name)
return ret
|
2bc4dd1e461f90f0a3193479ec15ddaaaee3d845
|
08ea46c0a9fb71ef222cf6afa2e9094f5663dcfb
|
/tests/test_filter.py
|
dae128a441028ff016b2a4722a9243df0efd95d2
|
[
"BSD-3-Clause-Clear"
] |
permissive
|
griffithlab/pVACtools
|
e358919eee76100f79dbe8d40d02b3fce8b227ac
|
3317d2c18e82edb5ea183ae09820beb68c39d256
|
refs/heads/master
| 2023-08-09T15:42:06.725426
| 2023-08-09T14:28:44
| 2023-08-09T14:28:44
| 102,625,109
| 124
| 64
|
BSD-3-Clause-Clear
| 2023-09-08T14:17:22
| 2017-09-06T15:23:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,407
|
py
|
test_filter.py
|
import unittest
import os
import tempfile
from filecmp import cmp
import py_compile
from pvactools.lib.filter import Filter, FilterCriterion
from tests.utils import *
class FilterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.filter_path = os.path.join(pvactools_directory(), "pvactools", "lib", "filter.py")
cls.test_data_path= os.path.join(pvactools_directory(), "tests", "test_data", "filter")
def module_compiles(self):
self.assertTrue(py_compile.compile(self.filter_path))
def test_less_than(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
"<",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.lt.tsv"),
False
))
def test_less_or_equal(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
"<=",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.le.tsv"),
False
))
def test_equal(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
"==",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.eq.tsv"),
False
))
def test_greater_or_equal(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
">=",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.ge.tsv"),
False
))
def test_greater_than(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Median MT IC50 Score",
">",
"500",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.gt.tsv"),
False
))
def test_NA(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Tumor RNA Depth",
">",
"100",
exclude_nas=False
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.NA.tsv"),
False
))
def test_exclude_NA(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'Test.combined.parsed.tsv'
),
output_file.name,
[FilterCriterion(
"Tumor RNA Depth",
">",
"100",
exclude_nas=True
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "Test.filtered.exclude_NA.tsv"),
False
))
def test_inf(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(Filter(
os.path.join(
self.test_data_path,
'input.inf.tsv'
),
output_file.name,
[FilterCriterion(
"Corresponding Fold Change",
">",
"100",
exclude_nas=True
)],
).execute())
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_path, "output.inf.tsv"),
False
))
|
d1284a710fe6c315d06565133c81991ba0a12bb3
|
018b3938de032c476442936fcae108b7e2101c2f
|
/jira_agile_metrics/cli_test.py
|
5098bfc95d49e9b63d00e8666ac78aa6f0ad5fba
|
[
"MIT"
] |
permissive
|
DeloitteDigitalUK/jira-agile-metrics
|
f18c62eb760c278286a95d077ed2dabfebd83ad6
|
1fb00bfa9e80db21e1aba2a4ec61eb9fe9d4c111
|
refs/heads/master
| 2023-07-09T16:45:21.799194
| 2023-06-29T12:32:47
| 2023-06-29T12:32:47
| 131,981,157
| 240
| 96
|
MIT
| 2023-06-29T12:32:49
| 2018-05-03T10:44:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,098
|
py
|
cli_test.py
|
import json
import tempfile
from .cli import (
override_options,
run_command_line,
configure_argument_parser,
get_trello_client,
)
def test_override_options():
class FauxArgs:
def __init__(self, opts):
self.__dict__.update(opts)
for k, v in opts.items():
setattr(self, k, v)
options = {"one": 1, "two": 2}
override_options(options, FauxArgs({}))
assert json.dumps(options) == json.dumps({"one": 1, "two": 2})
options = {"one": 1, "two": 2}
override_options(options, FauxArgs({"one": 11}))
assert json.dumps(options) == json.dumps({"one": 11, "two": 2})
options = {"one": 1, "two": 2}
override_options(options, FauxArgs({"three": 3}))
assert json.dumps(options) == json.dumps({"one": 1, "two": 2})
def test_run_command_line_with_trello_client(mocker):
config = """
Connection:
Type: trello
Query: project = "JLF"
Workflow:
Backlog: Open
In Progress:
- In Progress
- Reopened
Done:
- Resolved
- Closed
Output:
# CSV files with raw data for input to other
# tools or further analysis in a spreadsheet
# If you use .json or .xlsx as the extension,
# you can get JSON data files or Excel
# spreadsheets instead
Cycle time data:
- cycletime.csv
- cycletime.json
CFD data: cfd.csv
"""
mock_get_trello_client = mocker.patch(
"jira_agile_metrics.cli.get_trello_client"
)
mocker.patch("jira_agile_metrics.cli.QueryManager")
with tempfile.NamedTemporaryFile(mode="w", delete=False) as config_file:
config_file.write(config)
config_file.flush()
parser = configure_argument_parser()
args = parser.parse_args([config_file.name])
run_command_line(parser, args)
mock_get_trello_client.assert_called_once()
def test_get_trello_client(mocker):
mock_trello = mocker.patch("jira_agile_metrics.cli.TrelloClient")
get_trello_client(
{"username": "me", "key": "my_key", "token": "my_token"}, {}
)
mock_trello.assert_called_once()
|
7c8a2154e2e36a69c8afb5dba42f860e43cd3c9c
|
afbeee6a3a83946449e5fccf7c74457461ed921f
|
/docs/source/reference/plots/factory/vectors_labels_plot.py
|
179c8f5475fc22866097de8381c7585599bb7e6f
|
[
"MIT"
] |
permissive
|
K3D-tools/K3D-jupyter
|
d69e541de90835415be5516d3e6758b1fcd530d2
|
5973d30947f6bc80b2a50ba260f198bec57ddfc1
|
refs/heads/main
| 2023-09-01T20:41:01.159202
| 2023-08-26T20:45:56
| 2023-08-26T20:45:56
| 44,377,817
| 859
| 134
|
MIT
| 2023-08-26T20:33:59
| 2015-10-16T10:14:20
|
Python
|
UTF-8
|
Python
| false
| false
| 686
|
py
|
vectors_labels_plot.py
|
import k3d
import numpy as np
def generate():
o = np.array([[1, 2, 3],
[2, -3, 0]]).astype(np.float32)
v = np.array([[1, 1, 1],
[-4, 2, 3]]).astype(np.float32)
labels = ['(1, 1, 1)', '(2, -3, 0)']
plt_vectors = k3d.vectors(origins=o,
vectors=v,
origin_color=0x000000,
head_color=0x488889,
line_width=0.2,
use_head=False,
labels=labels)
plot = k3d.plot()
plot += plt_vectors
plot.snapshot_type = 'inline'
return plot.get_snapshot()
|
e616c870159da21fc0fbf9962c4433c49e3fb73e
|
5f1881006aaf4f3c2515f375ad29c15fd6612de2
|
/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/unit.py
|
4dd3390e650795788d6462af5c8f89e9691ebd97
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
microsoft/ContextualSP
|
4edb598d40f683f9a1143b92a9d24e1066d51ec4
|
4198ebce942f4afe7ddca6a96ab6f4464ade4518
|
refs/heads/master
| 2023-08-02T22:08:40.503853
| 2023-07-14T07:22:50
| 2023-07-14T07:22:50
| 255,534,819
| 332
| 70
|
MIT
| 2023-07-25T19:23:48
| 2020-04-14T07:01:54
|
Python
|
UTF-8
|
Python
| false
| false
| 272
|
py
|
unit.py
|
import abc
import typing
class Unit(metaclass=abc.ABCMeta):
"""Process unit do not persive state (i.e. do not need fit)."""
@abc.abstractmethod
def transform(self, input_: typing.Any):
"""Abstract base method, need to be implemented in subclass."""
|
60ccb9cd76271cfb37b101336876ab9b85649085
|
79cd7118917561ab5b8d25f04143e0975578b74f
|
/pytorch_widedeep/utils/deeptabular_utils.py
|
234c65b96aab2bcd40c799b3c1e592b075eabb69
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
jrzaurin/pytorch-widedeep
|
aac80263ba8e94d36b41fb1f47181a66471d7594
|
74f1ab6feb2e231fdb8c10478638d9e8d5cf3a47
|
refs/heads/master
| 2023-09-06T06:41:41.800801
| 2023-09-04T15:32:38
| 2023-09-04T15:32:38
| 107,763,164
| 1,036
| 124
|
Apache-2.0
| 2023-09-04T15:32:39
| 2017-10-21T08:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 9,761
|
py
|
deeptabular_utils.py
|
import warnings
import numpy as np
import torch
import pandas as pd
from scipy.ndimage import gaussian_filter1d
from sklearn.exceptions import NotFittedError
from scipy.signal.windows import triang
from pytorch_widedeep.wdtypes import List, Union, Tensor, Literal, Optional
from pytorch_widedeep.utils.general_utils import Alias
warnings.filterwarnings("ignore")
pd.options.mode.chained_assignment = None
__all__ = ["LabelEncoder", "find_bin", "get_kernel_window"]
class LabelEncoder:
r"""Label Encode categorical values for multiple columns at once
:information_source: **NOTE**:
LabelEncoder reserves 0 for `unseen` new categories. This is convenient
when defining the embedding layers, since we can just set padding idx to 0.
Parameters
----------
columns_to_encode: list, Optional, default = None
List of strings containing the names of the columns to encode. If
`None` all columns of type `object` in the dataframe will be label
encoded.
with_attention: bool, default = False
Boolean indicating whether the preprocessed data will be passed to an
attention-based model. Aliased as `for_transformer`.
shared_embed: bool, default = False
Boolean indicating if the embeddings will be "_shared_" when using
attention-based models. The idea behind `shared_embed` is described
in the Appendix A in the [TabTransformer paper](https://arxiv.org/abs/2012.06678):
'_The goal of having column embedding is to enable the model to
distinguish the classes in one column from those in the
other columns_'. In other words, the idea is to let the model learn
which column is embedded at the time. See: `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`.
Attributes
----------
encoding_dict : Dict
Dictionary containing the encoding mappings in the format, e.g. : <br/>
`{'colname1': {'cat1': 1, 'cat2': 2, ...}, 'colname2': {'cat1': 1, 'cat2': 2, ...}, ...}`
inverse_encoding_dict : Dict
Dictionary containing the inverse encoding mappings in the format, e.g. : <br/>
`{'colname1': {1: 'cat1', 2: 'cat2', ...}, 'colname2': {1: 'cat1', 2: 'cat2', ...}, ...}`
"""
@Alias("with_attention", "for_transformer")
def __init__(
self,
columns_to_encode: Optional[List[str]] = None,
with_attention: bool = False,
shared_embed: bool = False,
):
self.columns_to_encode = columns_to_encode
self.shared_embed = shared_embed
self.with_attention = with_attention
self.reset_embed_idx = not self.with_attention or self.shared_embed
def fit(self, df: pd.DataFrame) -> "LabelEncoder":
"""Creates encoding attributes
Returns
-------
LabelEncoder
`LabelEncoder` fitted object
"""
df_inp = df.copy()
if self.columns_to_encode is None:
self.columns_to_encode = list(
df_inp.select_dtypes(include=["object"]).columns
)
else:
# sanity check to make sure all categorical columns are in an adequate
# format
for col in self.columns_to_encode:
df_inp[col] = df_inp[col].astype("O")
unique_column_vals = dict()
for c in self.columns_to_encode:
unique_column_vals[c] = df_inp[c].unique()
self.encoding_dict = dict()
if "cls_token" in unique_column_vals and self.shared_embed:
self.encoding_dict["cls_token"] = {"[CLS]": 0}
del unique_column_vals["cls_token"]
# leave 0 for padding/"unseen" categories
idx = 1
for k, v in unique_column_vals.items():
self.encoding_dict[k] = {
o: i + idx for i, o in enumerate(unique_column_vals[k])
}
idx = 1 if self.reset_embed_idx else idx + len(unique_column_vals[k])
self.inverse_encoding_dict = dict()
for c in self.encoding_dict:
self.inverse_encoding_dict[c] = {
v: k for k, v in self.encoding_dict[c].items()
}
self.inverse_encoding_dict[c][0] = "unseen"
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Label Encoded the categories in `columns_to_encode`
Returns
-------
pd.DataFrame
label-encoded dataframe
"""
try:
self.encoding_dict
except AttributeError:
raise NotFittedError(
"This LabelEncoder instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using this LabelEncoder."
)
df_inp = df.copy()
# sanity check to make sure all categorical columns are in an adequate
# format
for col in self.columns_to_encode: # type: ignore
df_inp[col] = df_inp[col].astype("O")
for k, v in self.encoding_dict.items():
df_inp[k] = df_inp[k].apply(lambda x: v[x] if x in v.keys() else 0)
return df_inp
def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Combines `fit` and `transform`
Examples
--------
>>> import pandas as pd
>>> from pytorch_widedeep.utils import LabelEncoder
>>> df = pd.DataFrame({'col1': [1,2,3], 'col2': ['me', 'you', 'him']})
>>> columns_to_encode = ['col2']
>>> encoder = LabelEncoder(columns_to_encode)
>>> encoder.fit_transform(df)
col1 col2
0 1 1
1 2 2
2 3 3
>>> encoder.encoding_dict
{'col2': {'me': 1, 'you': 2, 'him': 3}}
Returns
-------
pd.DataFrame
label-encoded dataframe
"""
return self.fit(df).transform(df)
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Returns the original categories
Examples
--------
>>> import pandas as pd
>>> from pytorch_widedeep.utils import LabelEncoder
>>> df = pd.DataFrame({'col1': [1,2,3], 'col2': ['me', 'you', 'him']})
>>> columns_to_encode = ['col2']
>>> encoder = LabelEncoder(columns_to_encode)
>>> df_enc = encoder.fit_transform(df)
>>> encoder.inverse_transform(df_enc)
col1 col2
0 1 me
1 2 you
2 3 him
Returns
-------
pd.DataFrame
DataFrame with original categories
"""
for k, v in self.inverse_encoding_dict.items():
df[k] = df[k].apply(lambda x: v[x])
return df
def find_bin(
bin_edges: Union[np.ndarray, Tensor],
values: Union[np.ndarray, Tensor],
ret_value: bool = True,
) -> Union[np.ndarray, Tensor]:
"""Returns histograms left bin edge value or array indices from monotonically
increasing array of bin edges for each value in values.
If ret_value
Parameters
----------
bin_edges: Union[np.ndarray, Tensor]
monotonically increasing array of bin edges
values: Union[np.ndarray, Tensor]
values for which we want corresponding bins
ret_value: bool
if True, return bin values else indices
Returns
-------
left_bin_edges: Union[np.ndarray, Tensor]
left bin edges
"""
if type(bin_edges) == np.ndarray and type(values) == np.ndarray:
indices: Union[np.ndarray, Tensor] = np.searchsorted(
bin_edges, values, side="left"
)
indices = np.where(
(indices == 0) | (indices == len(bin_edges)), indices, indices - 1
)
indices = np.where(indices != len(bin_edges), indices, indices - 2)
elif type(bin_edges) == Tensor and type(values) == Tensor:
bin_edges = bin_edges.to(values.device)
indices = torch.searchsorted(bin_edges, values, right=False)
indices = torch.where(
(indices == 0) | (indices == len(bin_edges)), indices, indices - 1
)
indices = torch.where(indices != len(bin_edges), indices, indices - 2)
else:
raise TypeError(
"Both input arrays must be of teh same type, either np.ndarray of Tensor"
)
return indices if not ret_value else bin_edges[indices] # type: ignore[index]
def _laplace(x, sigma: Union[int, float] = 2):
return np.exp(-abs(x) / sigma) / (2.0 * sigma)
def get_kernel_window(
kernel: Literal["gaussian", "triang", "laplace"] = "gaussian",
ks: int = 5,
sigma: Union[int, float] = 2,
) -> List[float]:
"""Procedure to prepare window of values from symetrical kernel function for smoothing of the distribution in
Label and Feature Distribution Smoothing (LDS & FDS).
Parameters
----------
kernel: Literal['gaussian', 'triang', 'laplace'] = 'gaussian'
choice of kernel for label distribution smoothing
ks: int = 5
kernel size, i.e. count of samples in symmetric window
sigma: Union[int,float] = 2
standard deviation of ['gaussian','laplace'] kernel
Returns
-------
kernel_window: list
list with values from the chosen kernel function
"""
half_ks = (ks - 1) // 2
if kernel == "gaussian":
base_kernel = [0.0] * half_ks + [1.0] + [0.0] * half_ks
kernel_window = gaussian_filter1d(base_kernel, sigma=sigma)
elif kernel == "triang":
kernel_window = triang(ks) / sum(triang(ks))
elif kernel == "laplace":
kernel_window = list(map(_laplace, np.arange(-half_ks, half_ks + 1)))
else:
raise ValueError("Kernel can be only ['gaussian', 'triang', 'laplace'].")
return kernel_window
|
2ec8da6ffc29a6b2ff17c5b08c7659051213328c
|
e3e2dc80c5827a4597949e458c723225f7dafb66
|
/petastorm/spark/spark_dataset_converter.py
|
9b6d0d51332422e13c2fc5d8064a1418fdb683ed
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"Python-2.0"
] |
permissive
|
uber/petastorm
|
5c2403a353bde1367b86f64664ed7c12d9cfe662
|
0b0775af42539189d913702d9695566431dabd8a
|
refs/heads/master
| 2023-09-01T22:23:46.457386
| 2023-02-03T00:10:35
| 2023-02-03T00:10:35
| 137,539,238
| 1,677
| 284
|
Apache-2.0
| 2023-05-12T14:47:41
| 2018-06-15T23:15:29
|
Python
|
UTF-8
|
Python
| false
| false
| 31,333
|
py
|
spark_dataset_converter.py
|
# Copyright (c) 2020 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import datetime
import logging
import os
import shutil
import threading
import time
import uuid
from distutils.version import LooseVersion
from multiprocessing.pool import ThreadPool
from typing import List, Any
import pyspark
from pyarrow import LocalFileSystem
from pyspark.sql.session import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, FloatType
from six.moves.urllib.parse import urlparse
from petastorm import make_batch_reader
from petastorm.fs_utils import (FilesystemResolver,
get_filesystem_and_path_or_paths, normalize_dir_url)
from fsspec.core import strip_protocol
if LooseVersion(pyspark.__version__) < LooseVersion('3.0'):
def vector_to_array(_1, _2='float32'):
raise RuntimeError("Vector columns are only supported in pyspark>=3.0")
else:
from pyspark.ml.functions import vector_to_array # type: ignore # pylint: disable=import-error
DEFAULT_ROW_GROUP_SIZE_BYTES = 32 * 1024 * 1024
logger = logging.getLogger(__name__)
def _get_spark_session():
"""Get or create spark session. Note: This function can only be invoked from driver side."""
if pyspark.TaskContext.get() is not None:
# This is a safety check.
raise RuntimeError('_get_spark_session should not be invoked from executor side.')
return SparkSession.builder.getOrCreate()
_parent_cache_dir_url = None
def _get_parent_cache_dir_url():
"""Get parent cache dir url from `petastorm.spark.converter.parentCacheDirUrl`
We can only set the url config once.
"""
global _parent_cache_dir_url # pylint: disable=global-statement
conf_url = _get_spark_session().conf \
.get(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, None)
if conf_url is None:
raise ValueError(
"Please set the spark config {}.".format(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF))
conf_url = normalize_dir_url(conf_url)
_check_parent_cache_dir_url(conf_url)
_parent_cache_dir_url = conf_url
logger.info(
'Read %s %s', SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, _parent_cache_dir_url)
return _parent_cache_dir_url
def _default_delete_dir_handler(dataset_url):
resolver = FilesystemResolver(dataset_url)
fs = resolver.filesystem()
_dataset_url = strip_protocol(dataset_url)
if isinstance(fs, LocalFileSystem):
# pyarrow has a bug: LocalFileSystem.delete() is not implemented.
# https://issues.apache.org/jira/browse/ARROW-7953
# We can remove this branch once ARROW-7953 is fixed.
local_path = _dataset_url
if os.path.exists(local_path):
shutil.rmtree(local_path, ignore_errors=False)
else:
if fs.exists(_dataset_url):
fs.delete(_dataset_url, recursive=True)
_delete_dir_handler = _default_delete_dir_handler
def register_delete_dir_handler(handler):
"""Register a handler for delete a directory url.
:param handler: A deleting function which take a argument of directory url.
If ``None``, use the default handler, note the default handler
will use libhdfs3 driver.
"""
global _delete_dir_handler # pylint: disable=global-statement
if handler is None:
_delete_dir_handler = _default_delete_dir_handler
else:
_delete_dir_handler = handler
def _delete_cache_data_atexit(dataset_url):
try:
_delete_dir_handler(dataset_url)
except Exception as e: # pylint: disable=broad-except
logger.warning('Delete cache data %s failed due to %s', dataset_url, repr(e))
def _get_horovod_rank_and_size():
"""Get rank and size from environment, return (rank, size), if failed, return (``None``, ``None``)"""
rank_env = ['HOROVOD_RANK', 'OMPI_COMM_WORLD_RANK', 'PMI_RANK']
size_env = ['HOROVOD_SIZE', 'OMPI_COMM_WORLD_SIZE', 'PMI_SIZE']
for rank_var, size_var in zip(rank_env, size_env):
rank = os.environ.get(rank_var)
size = os.environ.get(size_var)
if rank is not None and size is not None:
return int(rank), int(size)
elif rank is not None or size is not None:
return None, None
return None, None
def _check_rank_and_size_consistent_with_horovod(petastorm_reader_kwargs):
"""Check whether the ``cur_shard`` and ``shard_count`` args are consistent with horovod environment variables.
If not consistent with horovod environment variables, log warning message and return ``False``.
If there're no related horovod environment variable set, return ``True``.
"""
hvd_rank, hvd_size = _get_horovod_rank_and_size()
cur_shard = petastorm_reader_kwargs.get('cur_shard')
shard_count = petastorm_reader_kwargs.get('shard_count')
if hvd_rank is not None and hvd_size is not None:
if cur_shard != hvd_rank or shard_count != hvd_size:
logger.warning(
'The petastorm reader arguments cur_shard(%d) and '
'shard_count(%d) is not consistent with horovod '
'environments hvd_rank(%d) and hvd_size(%d), If you want '
'each horovod worker train on one corresponding shard data, '
'you should set argument `cur_shard` to be `hvd.rank()` '
'and argument `shard_count` to be `hvd.size()`.',
cur_shard, shard_count, hvd_rank, hvd_size)
return False
return True
class SparkDatasetConverter(object):
"""A `SparkDatasetConverter` object holds one materialized spark dataframe and
can be used to make one or more tensorflow datasets or torch dataloaders.
The `SparkDatasetConverter` object is picklable and can be used in remote
processes.
See `make_spark_converter`
"""
PARENT_CACHE_DIR_URL_CONF = 'petastorm.spark.converter.parentCacheDirUrl'
def __init__(self, cache_dir_url, file_urls, dataset_size):
"""
:param cache_dir_url: A string denoting the path to store the cache files.
:param file_urls: a list of parquet file url list of this dataset.
:param dataset_size: An int denoting the number of rows in the dataframe.
"""
self.cache_dir_url = cache_dir_url
self.file_urls = file_urls
self.dataset_size = dataset_size
def __len__(self):
"""
:return: dataset size
"""
return self.dataset_size
@staticmethod
def _check_and_set_overriden_petastorm_args(petastorm_reader_kwargs, num_epochs, workers_count):
# override some arguments default values of petastorm reader
petastorm_reader_kwargs['num_epochs'] = num_epochs
if workers_count is None:
# TODO: generate a best tuned value for default worker count value
workers_count = 4
petastorm_reader_kwargs['workers_count'] = workers_count
_check_rank_and_size_consistent_with_horovod(petastorm_reader_kwargs)
def make_tf_dataset(
self,
batch_size=None,
prefetch=None,
num_epochs=None,
workers_count=None,
shuffling_queue_capacity=None,
**petastorm_reader_kwargs
):
"""Make a tensorflow dataset.
This method will do the following two steps:
1) Open a petastorm reader on the materialized dataset dir.
2) Create a tensorflow dataset based on the reader created in (1)
:param batch_size: The number of items to return per batch. Default ``None``.
If ``None``, current implementation will set batch size to be 32, in future,
``None`` value will denotes auto tuned best value for batch size.
:param prefetch: Prefetch size for tensorflow dataset. If ``None`` will use
tensorflow autotune size. Note only available on tensorflow>=1.14
:param num_epochs: An epoch is a single pass over all rows in the dataset.
Setting ``num_epochs`` to ``None`` will result in an infinite number
of epochs.
:param workers_count: An int for the number of workers to use in the
reader pool. This only is used for the thread or process pool.
``None`` denotes auto tune best value (current implementation when auto tune,
it will always use 4 workers, but it may be improved in future)
Default value ``None``.
:param shuffling_queue_capacity: An int specifying the number of items to fill into a queue
from which items are sampled each step to form batches. The larger the capacity, the
better shuffling of the elements within the dataset. The default value of ``None``
results in no shuffling.
:param petastorm_reader_kwargs: arguments for `petastorm.make_batch_reader()`,
exclude these arguments: ``dataset_url``, ``num_epochs``, ``workers_count``.
:return: a context manager for a `tf.data.Dataset` object.
when exit the returned context manager, the reader
will be closed.
"""
self._check_and_set_overriden_petastorm_args(
petastorm_reader_kwargs, num_epochs=num_epochs, workers_count=workers_count)
return TFDatasetContextManager(
self.file_urls,
batch_size=batch_size,
prefetch=prefetch,
petastorm_reader_kwargs=petastorm_reader_kwargs,
shuffling_queue_capacity=shuffling_queue_capacity)
def make_torch_dataloader(self,
batch_size=32,
num_epochs=None,
workers_count=None,
shuffling_queue_capacity=0,
data_loader_fn=None,
**petastorm_reader_kwargs):
"""Make a PyTorch DataLoader.
This method will do the following two steps:
1) Open a petastorm reader on the materialized dataset dir.
2) Create a PyTorch DataLoader based on the reader created in (1)
:param batch_size: The number of items to return per batch. Default ``None``.
If ``None``, current implementation will set batch size to be 32, in future,
``None`` value will denotes auto tuned best value for batch size.
:param num_epochs: An epoch is a single pass over all rows in the
dataset. Setting ``num_epochs`` to ``None`` will result in an
infinite number of epochs.
:param workers_count: An int for the number of workers to use in the
reader pool. This only is used for the thread or process pool.
Defaults value ``None``, which means using the default value from
`petastorm.make_batch_reader()`. We can autotune it in the future.
:param shuffling_queue_capacity: Queue capacity is passed to the underlying
:class:`tf.RandomShuffleQueue` instance. If set to 0, no shuffling will be done.
:param data_loader_fn: A function (or class) that generates a
`torch.utils.data.DataLoader` object. The default value of ``None`` uses
`petastorm.pytorch.DataLoader`.
:param petastorm_reader_kwargs: arguments for `petastorm.make_batch_reader()`,
exclude these arguments: ``dataset_url``, ``num_epochs``, ``workers_count``.
:return: a context manager for a `torch.utils.data.DataLoader` object.
when exit the returned context manager, the reader
will be closed.
"""
self._check_and_set_overriden_petastorm_args(
petastorm_reader_kwargs, num_epochs=num_epochs, workers_count=workers_count)
return TorchDatasetContextManager(
self.file_urls,
batch_size=batch_size,
petastorm_reader_kwargs=petastorm_reader_kwargs,
shuffling_queue_capacity=shuffling_queue_capacity,
data_loader_fn=data_loader_fn)
def delete(self):
"""Delete cache files at self.cache_dir_url."""
_remove_cache_metadata_and_data(self.cache_dir_url)
class TFDatasetContextManager(object):
"""A context manager that manages the creation and termination of a
:class:`petastorm.Reader`.
"""
def __init__(
self,
parquet_file_url_list,
batch_size,
prefetch,
petastorm_reader_kwargs,
shuffling_queue_capacity,
):
"""
:param parquet_file_url_list: A string specifying the parquet file URL list.
:param batch_size: batch size for tensorflow dataset.
:param prefetch: the prefectch size for tensorflow dataset.
:param petastorm_reader_kwargs: other arguments for petastorm reader
:param shuffling_queue_capacity: the shuffle queue capacity for the tensorflow dataset
"""
self.parquet_file_url_list = parquet_file_url_list
self.batch_size = batch_size
self.prefetch = prefetch
self.petastorm_reader_kwargs = petastorm_reader_kwargs
self.shuffling_queue_capacity = shuffling_queue_capacity
def __enter__(self):
# import locally to avoid importing tensorflow globally.
from petastorm.tf_utils import make_petastorm_dataset
import tensorflow.compat.v1 as tf # pylint: disable=import-error
_wait_file_available(self.parquet_file_url_list)
self.reader = make_batch_reader(self.parquet_file_url_list, **self.petastorm_reader_kwargs)
# unroll dataset
dataset = make_petastorm_dataset(self.reader).flat_map(
tf.data.Dataset.from_tensor_slices)
if self.shuffling_queue_capacity:
dataset = dataset.shuffle(self.shuffling_queue_capacity)
# TODO: auto tune best batch size in default case.
batch_size = self.batch_size or 32
dataset = dataset.batch(batch_size=batch_size)
prefetch = self.prefetch
if prefetch is None:
if LooseVersion(tf.__version__) >= LooseVersion('1.14'):
# We can make prefetch optimization
prefetch = tf.data.experimental.AUTOTUNE
else:
prefetch = 1
dataset = dataset.prefetch(prefetch)
return dataset
def __exit__(self, exc_type, exc_value, exc_traceback):
self.reader.stop()
self.reader.join()
class TorchDatasetContextManager(object):
"""A context manager that manages the creation and termination of a
:class:`petastorm.Reader`.
"""
def __init__(self,
parquet_file_url_list,
batch_size,
petastorm_reader_kwargs,
shuffling_queue_capacity,
data_loader_fn):
"""
:param parquet_file_url_list: A string specifying the parquet file URL list.
:param batch_size: The number of items to return per batch. Default ``None``.
If ``None``, current implementation will set batch size to be 32, in future,
``None`` value will denotes auto tuned best value for batch size.
:param petastorm_reader_kwargs: other arguments for petastorm reader
:param shuffling_queue_capacity: Queue capacity is passed to the underlying
:class:`tf.RandomShuffleQueue` instance. If set to 0, no shuffling will be done.
:param data_loader_fn: function to generate the PyTorch DataLoader.
See `SparkDatasetConverter.make_torch_dataloader()` for the definitions
of the other parameters.
"""
self.parquet_file_url_list = parquet_file_url_list
self.batch_size = batch_size
self.petastorm_reader_kwargs = petastorm_reader_kwargs
self.shuffling_queue_capacity = shuffling_queue_capacity
self.data_loader_fn = data_loader_fn
def __enter__(self):
from petastorm.pytorch import DataLoader
_wait_file_available(self.parquet_file_url_list)
self.reader = make_batch_reader(self.parquet_file_url_list, **self.petastorm_reader_kwargs)
data_loader_fn = self.data_loader_fn or DataLoader
self.loader = data_loader_fn(reader=self.reader,
batch_size=self.batch_size,
shuffling_queue_capacity=self.shuffling_queue_capacity)
return self.loader
def __exit__(self, exc_type, exc_value, exc_traceback):
self.reader.stop()
self.reader.join()
def _get_df_plan(df):
return df._jdf.queryExecution().analyzed()
class CachedDataFrameMeta(object):
def __init__(self, df, parent_cache_dir_url, row_group_size, compression_codec, dtype):
self.row_group_size = row_group_size
self.compression_codec = compression_codec
# Note: the metadata will hold dataframe plan, but it won't
# hold the dataframe object (dataframe plan will not reference
# dataframe object),
# This means the dataframe can be released by spark gc.
self.df_plan = _get_df_plan(df)
self.cache_dir_url = None
self.dtype = dtype
self.parent_cache_dir_url = parent_cache_dir_url
@classmethod
def create_cached_dataframe_meta(cls, df, parent_cache_dir_url, row_group_size,
compression_codec, dtype):
meta = cls(df, parent_cache_dir_url, row_group_size, compression_codec, dtype)
meta.cache_dir_url = _materialize_df(
df,
parent_cache_dir_url=parent_cache_dir_url,
parquet_row_group_size_bytes=row_group_size,
compression_codec=compression_codec,
dtype=dtype)
return meta
_cache_df_meta_list: List[Any] = [] # TODO(Yevgeni): can be more precise with the type (instead of Any)
_cache_df_meta_list_lock = threading.Lock()
def _is_spark_local_mode():
return _get_spark_session().conf.get('spark.master').strip().lower().startswith('local')
def _check_url(dir_url):
"""Check dir url, will check scheme, raise error if empty scheme"""
parsed = urlparse(dir_url)
if not parsed.scheme:
raise ValueError(
'ERROR! A scheme-less directory url ({}) is no longer supported. '
'Please prepend "file://" for local filesystem.'.format(dir_url))
def _normalize_databricks_dbfs_url(url, err_msg):
if not (
url.startswith("file:/dbfs/") or
url.startswith("file:///dbfs/") or
url.startswith("dbfs:///") or
(url.startswith("dbfs:/") and not url.startswith("dbfs://"))
):
raise ValueError(err_msg)
if url.startswith("dbfs:///"):
# convert it to a dbfs fuse path
url = "file:/dbfs/" + url[len("dbfs:///"):]
elif url.startswith("dbfs:/"):
url = "file:/dbfs/" + url[len("dbfs:/"):]
return url
def _check_parent_cache_dir_url(dir_url):
"""Check dir url whether is suitable to be used as parent cache directory."""
_check_url(dir_url)
fs, dir_path = get_filesystem_and_path_or_paths(dir_url)
if 'DATABRICKS_RUNTIME_VERSION' in os.environ and not _is_spark_local_mode():
if isinstance(fs, LocalFileSystem):
# User need to use dbfs fuse URL.
if not dir_path.startswith('/dbfs/'):
logger.warning(
"Usually, when running on databricks spark cluster, you should specify a dbfs fuse path "
"for %s, like: 'file:/dbfs/path/to/cache_dir', otherwise, you should mount NFS to this "
"directory '%s' on all nodes of the cluster, e.g. using EFS.",
SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, dir_url)
def _make_sub_dir_url(dir_url, name):
parsed = urlparse(dir_url)
new_path = parsed.path + '/' + name
return parsed._replace(path=new_path).geturl()
def _cache_df_or_retrieve_cache_data_url(df, parent_cache_dir_url,
parquet_row_group_size_bytes,
compression_codec,
dtype):
"""Check whether the df is cached.
If so, return the existing cache file path.
If not, cache the df into the cache_dir in parquet format and return the
cache file path.
Use atexit to delete the cache before the python interpreter exits.
:param df: A :class:`pyspark.sql.DataFrame` object.
:param parquet_row_group_size_bytes: An int denoting the number of bytes
in a parquet row group.
:param compression_codec: Specify compression codec.
:param dtype: ``None``, 'float32' or 'float64', specifying the precision of the floating-point
elements in the output dataset. Integer types will remain unchanged. If ``None``, all types
will remain unchanged. Default 'float32'.
:return: A string denoting the path of the saved parquet file.
"""
# TODO
# Improve the cache list by hash table (Note we need use hash(df_plan +
# row_group_size)
with _cache_df_meta_list_lock:
df_plan = _get_df_plan(df)
for meta in _cache_df_meta_list:
if meta.row_group_size == parquet_row_group_size_bytes and \
meta.compression_codec == compression_codec and \
meta.df_plan.sameResult(df_plan) and \
meta.dtype == dtype and \
meta.parent_cache_dir_url == parent_cache_dir_url:
return meta.cache_dir_url
# do not find cached dataframe, start materializing.
cached_df_meta = CachedDataFrameMeta.create_cached_dataframe_meta(
df, parent_cache_dir_url, parquet_row_group_size_bytes,
compression_codec, dtype)
_cache_df_meta_list.append(cached_df_meta)
return cached_df_meta.cache_dir_url
def _remove_cache_metadata_and_data(cache_dir_url):
with _cache_df_meta_list_lock:
for i in range(len(_cache_df_meta_list)):
if _cache_df_meta_list[i].cache_dir_url == cache_dir_url:
_cache_df_meta_list.pop(i)
break
_delete_dir_handler(cache_dir_url)
def _convert_precision(df, dtype):
if dtype is None:
return df
if dtype != "float32" and dtype != "float64":
raise ValueError("dtype {} is not supported. \
Use 'float32' or float64".format(dtype))
source_type, target_type = (DoubleType, FloatType) \
if dtype == "float32" else (FloatType, DoubleType)
logger.warning("Converting floating-point columns to %s", dtype)
for field in df.schema:
col_name = field.name
if isinstance(field.dataType, source_type):
df = df.withColumn(col_name, df[col_name].cast(target_type()))
elif isinstance(field.dataType, ArrayType) and \
isinstance(field.dataType.elementType, source_type):
df = df.withColumn(col_name, df[col_name].cast(ArrayType(target_type())))
return df
def _convert_vector(df, dtype):
from pyspark.ml.linalg import VectorUDT
from pyspark.mllib.linalg import VectorUDT as OldVectorUDT
for field in df.schema:
col_name = field.name
if isinstance(field.dataType, VectorUDT) or \
isinstance(field.dataType, OldVectorUDT):
df = df.withColumn(col_name,
vector_to_array(df[col_name], dtype))
return df
def _gen_cache_dir_name():
"""Generate a random directory name for storing dataset.
The directory name format is:
{datetime}-{spark_application_id}-{uuid4}
This will help user to find the related spark application for a directory.
So that if atexit deletion failed, user can manually delete them.
"""
uuid_str = str(uuid.uuid4())
time_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
appid = _get_spark_session().sparkContext.applicationId
return '{time}-appid-{appid}-{uuid}'.format(time=time_str, appid=appid, uuid=uuid_str)
def _materialize_df(df, parent_cache_dir_url, parquet_row_group_size_bytes,
compression_codec, dtype):
dir_name = _gen_cache_dir_name()
save_to_dir_url = _make_sub_dir_url(parent_cache_dir_url, dir_name)
df = _convert_vector(df, dtype)
df = _convert_precision(df, dtype)
df.write \
.option("compression", compression_codec) \
.option("parquet.block.size", parquet_row_group_size_bytes) \
.parquet(save_to_dir_url)
logger.info('Materialize dataframe to url %s successfully.', save_to_dir_url)
atexit.register(_delete_cache_data_atexit, save_to_dir_url)
return save_to_dir_url
_FILE_AVAILABILITY_WAIT_TIMEOUT_SECS = 30
def _wait_file_available(url_list):
"""Waiting about _FILE_AVAILABILITY_WAIT_TIMEOUT_SECS seconds (default 30 seconds) to make sure
all files are available for reading. This is useful in some filesystems, such as S3 which only
providing eventually consistency.
"""
fs, path_list = get_filesystem_and_path_or_paths(url_list)
logger.debug('Waiting some seconds until all parquet-store files appear at urls %s', ','.join(url_list))
def wait_for_file(path):
end_time = time.time() + _FILE_AVAILABILITY_WAIT_TIMEOUT_SECS
while time.time() < end_time:
if fs.exists(path):
return True
time.sleep(0.1)
return False
pool = ThreadPool(64)
try:
results = pool.map(wait_for_file, path_list)
failed_list = [url for url, result in zip(url_list, results) if not result]
if failed_list:
raise RuntimeError('Timeout while waiting for all parquet-store files to appear at urls {failed_list},'
'Please check whether these files were saved successfully when materializing dataframe.'
.format(failed_list=','.join(failed_list)))
finally:
pool.close()
pool.join()
def _check_dataset_file_median_size(url_list):
fs, path_list = get_filesystem_and_path_or_paths(url_list)
RECOMMENDED_FILE_SIZE_BYTES = 50 * 1024 * 1024
# TODO: also check file size for other file system.
if isinstance(fs, LocalFileSystem):
pool = ThreadPool(64)
try:
file_size_list = pool.map(os.path.getsize, path_list)
if len(file_size_list) > 1:
mid_index = len(file_size_list) // 2
median_size = sorted(file_size_list)[mid_index] # take the larger one if tie
if median_size < RECOMMENDED_FILE_SIZE_BYTES:
logger.warning('The median size %d B (< 50 MB) of the parquet files is too small. '
'Total size: %d B. Increase the median file size by calling df.repartition(n) or '
'df.coalesce(n), which might help improve the performance. Parquet files: %s, ...',
median_size, sum(file_size_list), url_list[0])
finally:
pool.close()
pool.join()
def make_spark_converter(
df,
parquet_row_group_size_bytes=DEFAULT_ROW_GROUP_SIZE_BYTES,
compression_codec=None,
dtype='float32'
):
"""Convert a spark dataframe into a :class:`SparkDatasetConverter` object.
It will materialize a spark dataframe to the directory specified by
spark conf 'petastorm.spark.converter.parentCacheDirUrl'.
The dataframe will be materialized in parquet format, and we can specify
`parquet_row_group_size_bytes` and `compression_codec` for the parquet
format. See params documentation for details.
The returned `SparkDatasetConverter` object will hold the materialized
dataframe, and can be used to make one or more tensorflow datasets or
torch dataloaders.
We can explicitly delete the materialized dataframe data, see
`SparkDatasetConverter.delete`, and when the spark application exit,
it will try best effort to delete the materialized dataframe data.
:param df: The :class:`pyspark.sql.DataFrame` object to be converted,
or a string of path pointing to the directory that stores the dataframe data
as parquet format, on databricks runtime, the path must be a dbfs
fuse path like 'file:/dbfs/xxx' or a dbfs path like 'dbfs:/xxx'.
:param parquet_row_group_size_bytes: An int denoting the number of bytes
in a parquet row group when materializing the dataframe.
:param compression_codec: Specify compression codec.
It can be one of 'uncompressed', 'bzip2', 'gzip', 'lz4', 'snappy', 'deflate'.
Default ``None``. If ``None``, it will leave the data uncompressed.
:param dtype: ``None``, 'float32' or 'float64', specifying the precision of the floating-point
elements in the output dataset. Integer types will remain unchanged. If ``None``, all types
will remain unchanged. Default 'float32'.
:return: a :class:`SparkDatasetConverter` object that holds the
materialized dataframe and can be used to make one or more tensorflow
datasets or torch dataloaders.
"""
parent_cache_dir_url = _get_parent_cache_dir_url()
if isinstance(df, str):
dataset_dir_url = df
if 'DATABRICKS_RUNTIME_VERSION' in os.environ:
dataset_dir_url = _normalize_databricks_dbfs_url(
dataset_dir_url,
"On databricks runtime, if `df` argument is a string, it must be a dbfs "
"fuse path like 'file:/dbfs/xxx' or a dbfs path like 'dbfs:/xxx'."
)
else:
# TODO: Improve default behavior to be automatically choosing the best way.
compression_codec = compression_codec or "uncompressed"
if compression_codec.lower() not in \
['uncompressed', 'bzip2', 'gzip', 'lz4', 'snappy', 'deflate']:
raise RuntimeError(
"compression_codec should be None or one of the following values: "
"'uncompressed', 'bzip2', 'gzip', 'lz4', 'snappy', 'deflate'")
dataset_dir_url = _cache_df_or_retrieve_cache_data_url(
df, parent_cache_dir_url, parquet_row_group_size_bytes, compression_codec, dtype)
# TODO: improve this by read parquet file metadata to get count
# Currently spark can make sure to only read the minimal column
# so count will usually be fast.
spark = _get_spark_session()
spark_df = spark.read.parquet(dataset_dir_url)
dataset_size = spark_df.count()
parquet_file_url_list = list(spark_df._jdf.inputFiles())
_check_dataset_file_median_size(parquet_file_url_list)
return SparkDatasetConverter(dataset_dir_url, parquet_file_url_list, dataset_size)
|
36d8db00928e9dc63c1825680753c90f4ca3e0b6
|
69d8d91954f6623f3674d52d734d589f72383628
|
/openstack_dashboard/test/test_plugins/panel_config/_10_admin_add_panel.py
|
ed490549ea45fc52fd917849570547a1590b4e30
|
[
"Apache-2.0"
] |
permissive
|
openstack/horizon
|
d031cebe126c06ad9717bbc52790b3d890e8661e
|
7896fd8c77a6766a1156a520946efaf792b76ca5
|
refs/heads/master
| 2023-09-04T06:57:58.069907
| 2023-09-01T20:17:10
| 2023-09-01T20:17:10
| 2,665,166
| 1,060
| 1,175
|
Apache-2.0
| 2023-08-07T02:33:44
| 2011-10-28T13:12:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
_10_admin_add_panel.py
|
# The name of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'plugin_panel'
# The name of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'admin'
# The name of the panel group the PANEL is associated with.
PANEL_GROUP = 'admin'
# Python panel class of the PANEL to be added.
ADD_PANEL = \
'openstack_dashboard.test.test_panels.plugin_panel.panel.PluginPanel'
# A list of Django applications to be prepended to ``INSTALLED_APPS``
ADD_INSTALLED_APPS = ['openstack_dashboard.test.test_panels.plugin_panel']
# A list of AngularJS modules to be loaded when Angular bootstraps.
ADD_ANGULAR_MODULES = ['testAngularModule']
# A list of javascript files to be included in the compressed set of files
ADD_JS_FILES = ['plugin_panel/plugin_module.js']
# A list of scss files to be included in the compressed set of files
ADD_JS_SPEC_FILES = ['plugin_panel/plugin.spec.js']
# A list of scss files to be included in the compressed set of files
ADD_SCSS_FILES = ['plugin_panel/plugin.scss']
# A list of tuples of xstatic modules and files to be included
# in the compressed set of files
ADD_XSTATIC_MODULES = [
('xstatic.pkg.foo', None)
]
# A list of extensible header views to be displayed
ADD_HEADER_SECTIONS = \
['openstack_dashboard.test.test_panels.plugin_panel.views.TestBannerView',]
|
199321bce526cab9ae7a0f7bb4ece53a1606e802
|
8ef61b6b4cd0c533524d23c3e77a9caf70077023
|
/tests/network/test_providers__tvmaze.py
|
9e00a22707f6c6381e866fe34c279ef4e3242b62
|
[
"MIT"
] |
permissive
|
jkwill87/mnamer
|
73e68d27d184193218605484a0db1c935f256ca6
|
82da0712ab99e9d80c60adda3db6a3225fdf49b4
|
refs/heads/main
| 2023-08-30T21:48:34.567239
| 2023-08-15T19:31:04
| 2023-08-15T19:31:04
| 107,178,928
| 656
| 69
|
MIT
| 2023-09-11T15:02:41
| 2017-10-16T20:23:27
|
Python
|
UTF-8
|
Python
| false
| false
| 6,450
|
py
|
test_providers__tvmaze.py
|
import pytest
from mnamer.exceptions import MnamerNotFoundException
from mnamer.metadata import MetadataEpisode
from mnamer.providers import TvMaze
from tests import EPISODE_META, JUNK_TEXT, TEST_DATE
pytestmark = [
pytest.mark.network,
pytest.mark.tvmaze,
pytest.mark.flaky(reruns=1),
]
@pytest.fixture(scope="session")
def provider():
return TvMaze()
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_id_tvmaze_and_season_and_episode(meta: dict, provider: TvMaze):
query = MetadataEpisode(
id_tvmaze=meta["id_tvmaze"], season=meta["season"], episode=meta["episode"]
)
results = list(provider.search(query))
assert results
for result in results:
assert result.title == meta["title"]
def test_search_id_tvmaze_and_season_and_episode__no_hits(provider: TvMaze):
query = MetadataEpisode(id_tvmaze=JUNK_TEXT, season=1, episode=1)
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_id_tvmaze_and_date(meta: dict, provider: TvMaze):
query = MetadataEpisode(id_tvmaze=meta["id_tvmaze"], date=meta["date"])
results = list(provider.search(query))
assert results
for result in results:
assert result.title == meta["title"]
def test_search_id_tvmaze_and_date__no_hits(provider: TvMaze):
query = MetadataEpisode(id_tvmaze=JUNK_TEXT, date=TEST_DATE)
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_id_tvdb_and_date(meta, provider: TvMaze):
query = MetadataEpisode(id_tvdb=meta["id_tvdb"], date=meta["date"])
results = list(provider.search(query))
assert results
for result in results:
assert result.title == meta["title"]
def test_search_id_tvdb_and_date__no_hits(provider: TvMaze):
query = MetadataEpisode(id_tvdb=JUNK_TEXT, date=TEST_DATE)
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_id_tvmaze_and_season(meta: dict, provider: TvMaze):
query = MetadataEpisode(id_tvmaze=meta["id_tvmaze"], season=meta["season"])
results = list(provider.search(query))
assert results
assert any(result.title == meta["title"] for result in results)
assert all(result.season == meta["season"] for result in results)
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_id_tvmaze_and_episode(meta: dict, provider: TvMaze):
query = MetadataEpisode(id_tvmaze=meta["id_tvmaze"], episode=meta["episode"])
results = list(provider.search(query))
assert results
assert any(result.title == meta["title"] for result in results)
assert all(result.episode == meta["episode"] for result in results)
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_id_tvmaze(meta: dict, provider: TvMaze):
query = MetadataEpisode(id_tvmaze=meta["id_tvmaze"])
results = list(provider.search(query))
assert results
assert any(result.title == meta["title"] for result in results)
def test_search_id_tvmaze__no_hits(provider: TvMaze):
query = MetadataEpisode(id_tvmaze=JUNK_TEXT)
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_id_tvdb(meta: dict, provider: TvMaze):
query = MetadataEpisode(id_tvdb=meta["id_tvdb"])
results = list(provider.search(query))
assert results
assert any(result.title == meta["title"] for result in results)
def test_search_id_tvdb__no_hits(provider: TvMaze):
query = MetadataEpisode(id_tvdb=JUNK_TEXT)
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_series_and_season_and_episode(meta: dict, provider: TvMaze):
query = MetadataEpisode(
series=meta["series"], season=meta["season"], episode=meta["episode"]
)
results = list(provider.search(query))
assert results
assert any(result.title == meta["title"] for result in results)
def test_search_series_and_season_and_episode__no_hits(provider: TvMaze):
query = MetadataEpisode(series=JUNK_TEXT, season=1, episode=1)
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_series_and_season(meta: dict, provider: TvMaze):
query = MetadataEpisode(series=meta["series"], season=meta["season"])
results = list(provider.search(query))
assert results
assert any(result.title == meta["title"] for result in results)
def test_search_series_and_season__no_hits(provider):
query = MetadataEpisode(series=JUNK_TEXT, season=1)
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_series_and_episode(meta: dict, provider: TvMaze):
query = MetadataEpisode(series=meta["series"], episode=meta["episode"])
results = list(provider.search(query))
assert results
assert any(result.title == meta["title"] for result in results)
def test_search_series_and_episode__no_hits(provider: TvMaze):
query = MetadataEpisode(series=JUNK_TEXT, episode=1)
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
@pytest.mark.parametrize("meta", EPISODE_META.values(), ids=list(EPISODE_META))
def test_search_series(meta: dict, provider: TvMaze):
query = MetadataEpisode(series=meta["series"])
results = list(provider.search(query))
assert results
assert any(result.title == meta["title"] for result in results)
def test_search_series__no_hits(provider: TvMaze):
query = MetadataEpisode(series=JUNK_TEXT)
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
def test_search__no_hits(provider: TvMaze):
query = MetadataEpisode()
with pytest.raises(MnamerNotFoundException):
next(provider.search(query))
|
4254244a006411c17592bdd840f91ffa4ec9cdef
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/intentions/paramTypeInNewGoogleDocString_after.py
|
3427a1256e77f475bfd6c4f3b8559264e3ec496b
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
paramTypeInNewGoogleDocString_after.py
|
def f(x):
"""
Args:
x (object):
"""
return 42
|
5912c5105fc327385d1ea559dbdeb6a05699d24b
|
163bad17c2ba0aeeb05e29d1a7f870e675ee28eb
|
/hikyuu/util/singleton.py
|
4bd348a3f88db227e645efc651001bec990c6dab
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fasiondog/hikyuu
|
8b7bc4fd99ff915c621586a480c3663ef3fae464
|
86b0fa5b0e847d9a04905bca93660a7a33fc9fc2
|
refs/heads/master
| 2023-09-03T15:04:33.983389
| 2023-09-03T11:17:46
| 2023-09-03T11:17:46
| 5,103,141
| 1,884
| 547
|
MIT
| 2023-09-06T16:53:51
| 2012-07-18T23:21:42
|
C++
|
UTF-8
|
Python
| false
| false
| 764
|
py
|
singleton.py
|
#!/usr/bin/python
# -*- coding: utf8 -*-
# cp936
#==========================================================
# History
# 1. 20200822, Added by fasiondog
#==========================================================
import threading
class SingletonType(type):
"""基于 metalclass 实现单例
示例:
class MyClass(metaclass=SingletonType):
def __init__(self,name):
self.name = name
"""
_instance_lock = threading.Lock()
def __call__(cls, *args, **kwargs):
if not hasattr(cls, "_instance"):
with SingletonType._instance_lock:
if not hasattr(cls, "_instance"):
cls._instance = super(SingletonType, cls).__call__(*args, **kwargs)
return cls._instance
|
6ff695df794174d470f3a93489d82796ffb211b4
|
ef77030fb29c5bb01c6dd553df1fb52d4ef9fb69
|
/migrate.py
|
066711f333e047abb89f1c920ca6157a541574e3
|
[
"MIT"
] |
permissive
|
Run1e/AceBot
|
8c9aaaf27b710fd70b6c85d4c65bb4be71af9117
|
b4640f633947f0a9fd902e8ddc95a4b667b85602
|
refs/heads/master
| 2023-08-08T10:30:14.962290
| 2023-06-26T23:24:59
| 2023-06-26T23:24:59
| 95,386,761
| 107
| 38
|
MIT
| 2023-07-20T15:47:24
| 2017-06-25T21:28:32
|
Python
|
UTF-8
|
Python
| false
| false
| 30,547
|
py
|
migrate.py
|
import asyncio
import asyncpg
from config import DB_BIND
QUERIES = open("migrate.sql", "r").read()
def log(connection, message):
print(message)
async def main():
db = await asyncpg.connect(DB_BIND)
db.add_log_listener(log)
async with db.transaction():
await db.execute(QUERIES)
# populate facts if empty
if await db.fetchval("SELECT COUNT(id) FROM facts") == 0:
for fact in facts.split("\n"):
await db.execute("INSERT INTO facts (content) VALUES ($1)", fact)
facts = """
If you somehow found a way to extract all of the gold from the bubbling core of our lovely little planet, you would be able to cover all of the land in a layer of gold up to your knees.
McDonalds calls frequent buyers of their food “heavy users.”
The average person spends 6 months of their lifetime waiting on a red light to turn green.
The largest recorded snowflake was in Keogh, MT during year 1887, and was 15 inches wide.
You burn more calories sleeping than you do watching television.
There are more lifeforms living on your skin than there are people on the planet.
Southern sea otters have flaps of skin under their forelegs that act as pockets. When diving, they use these pouches to store rocks and food.
In 1386 a pig in France was executed by public hanging for the murder of a child.
One in every five adults believe that aliens are hiding in our planet disguised as humans.
If you believe that you’re truly one in a million, there are still approximately 7,184 more people out there just like you.
A single cloud can weight more than 1 million pounds.
James Buchanan, the 15th U.S. president continuously bought slaves with his own money in order to free them.
There are more possible iterations of a game of chess than there are atoms in the observable universe.
The average person walks the equivalent of three times around the world in a lifetime.
Men are 6 times more likely to be struck by lightning than women.
Coca-Cola would be green if coloring wasn’t added to it.
You cannot snore and dream at the same time.
The world’s oldest piece of chewing gum is over 9,000 years old!
A coyote can hear a mouse moving underneath a foot of snow.
Bolts of lightning can shoot out of an erupting volcano.
New York drifts about one inch farther away from London each year.
A U.S. dollar bill can be folded approximately 4,000 times in the same place before it will tear.
A sneeze travels about 100 miles per hour.
Earth has traveled more than 5,000 miles in the past 5 minutes.
It would take a sloth one month to travel one mile.
10% of the World’s population is left handed.
A broken clock is right two times every day.
According to Amazon, the most highlighted books on Kindle are the Bible, the Steve Jobs biography, and The Hunger Games.
Bob Marley’s last words to his son before he died were “Money can’t buy life.”
A mole can dig a tunnel that is 300 feet long in only one night.
A hippo’s wide open mouth is big enough to fit a 4-foot-tall child in.
Chewing gum while you cut an onion will help keep you from crying.
If you were to stretch a Slinky out until it’s flat, it would measure 87 feet long.
Al Capone’s business card said he was a used furniture dealer.
There are more collect calls on Father’s Day than on any other day of the year.
Banging your head against a wall burns 150 calories an hour.
95% of people text things they could never say in person.
A crocodile can’t poke its tongue out.
It is physically impossible for pigs to look up into the sky.
Guinness Book of Records holds the record for being the book most often stolen from Public Libraries.
Drying fruit depletes it of 30-80% of its vitamin and antioxidant content.
A 2010 study found that 48% of soda fountains contained fecal bacteria, and 11% contained E. Coli.
9 out of 10 Americans are deficient in potassium.
Blueberries will not ripen until they are picked.
About 150 people per year are killed by coconuts.
About half of all Americans are on a diet on any given day.
A hardboiled egg will spin, but a soft-boiled egg will not.
Avocados are poisonous to birds.
Chewing gum burns about 11 calories per hour.
The number of animals killed for meat every hour in the U.S. is 500,000.
If you try to suppress a sneeze, you can rupture a blood vessel in your head or neck and die.
Celery has negative calories! It takes more calories to eat a piece of celery than the celery has in it to begin with. It’s the same with apples!
More people are allergic to cow’s milk than any other food.
Only 8% of dieters will follow a restrictive weight loss plan (like the HCG Drops Diet, garcinia cambogia diet).
Coconut water can be used as blood plasma.
The word “gorilla” is derived from a Greek word meaning, “A tribe of hairy women.”
Prisoners in Canadian war camps during World War II were treated so well that a lot of them didn’t want to leave when the war was over.
Gorillas burp when they are happy.
In New York, it is illegal to sell a haunted house without telling the buyer.
In 2006 someone tried to sell New Zealand on eBay. The price got up to $3,000 before eBay shut it down.
It is considered good luck in Japan when a sumo wrestler makes your baby cry.
A man from Britain changed his name to Tim Pppppppppprice to make it harder for telemarketers to pronounce.
A woman from California once tried to sue the makers of Cap’n Crunch, because the Crunch Berries contained “no berries of any kind.”
Apple launched a clothing line in 1986. It was described as a “train wreck” by others.
In Japan, crooked teeth are considered cute and attractive.
A Swedish woman lost her wedding ring, and found it 16 years later – growing on a carrot in her garden.
Donald Duck comics were banned from Finland because he doesn’t wear pants.
The chance of you dying on the way to get lottery tickets is actually greater than your chance of winning.
Cherophobia is the fear of fun.
The toothpaste “Colgate” in Spanish translates to “go hang yourself.”
Pirates wore earrings because they believed it improved their eyesight.
Human thigh bones are stronger than concrete.
Cockroaches can live for several weeks with their heads cut off, because their brains are located inside their body. They would eventually die from being unable to eat.
Scientists have tracked butterflies that travel over 3,000 miles.
To produce a single pound of honey, a single bee would have to visit 2 million flowers.
The population is expected to rise to 10.8 billion by the year 2080.
You breathe on average about 8,409,600 times a year.
More than 60,000 people are flying over the United States in an airplane right now.
Hamsters run up to 8 miles at night on a wheel.
A waterfall in Hawaii goes up sometimes instead of down.
A church in the Czech Republic has a chandelier made entirely of human bones.
Under the Code of Hammurabi, bartenders who watered down beer were punished by execution.
Our eyes are always the same size from birth, but our nose and ears never stop growing.
During your lifetime, you will produce enough saliva to fill two swimming pools.
You are 1% shorter in the evening than in the morning.
The elephant is the only mammal that can’t jump!
Most dust particles in your house are made from dead skin!
If 33 million people held hands, they could make it all the way around the equator.
Earth is the only planet that is not named after a god.
The bloodhound is the only animal whose evidence is admissible in court.
You are born with 300 bones, but by the time you are an adult you only have 206.
A ten-gallon hat will only hold ¾ of a gallon.
Just like fingerprints, everyone has different tongue prints.
ATMs were originally thought to be failures, because the only users were prostitutes and gamblers who didn’t want to deal with tellers face to face.
Of all the words in the English language, the word “set” has the most definitions. The word “run” comes in close second.
A “jiffy” is the scientific name for 1/100th of a second.
One fourth of the bones in your body are located in your feet.
111,111,111 × 111,111,111 = 12,345,678,987,654,321
Blue-eyed people tend to have the highest tolerance of alcohol.
A traffic jam lasted for more than 10 days, with cars only moving 0.6 miles a day.
Every year more than 2500 left-handed people are killed from using right-handed products.
More than 50% of the people in the world have never made or received a telephone call.
The cigarette lighter was invented before the match.
Sea otters hold hands when they sleep so that they do not drift apart.
The Golden Poison Dart Frog’s skin has enough toxins to kill 100 people.
The male ostrich can roar just like a lion.
Mountain lions can whistle.
Cows kill more people than sharks do.
Cats have 32 muscles in each of their ears.
A tarantula can live without food for more than two years.
The tongue of a blue whale weighs more than most elephants!
Ever wonder where the phrase “It’s raining cats and dogs” comes from? In the 17th century many homeless cats and dogs would drown and float down the streets of England, making it look like it literally rained cats and dogs.
It takes about 3,000 cows to supply enough leather for the NFL for only one year.
Male dogs lift their legs when they are urinating for a reason. They are trying to leave their mark higher so that it gives off the message that they are tall and intimidating.
A hummingbird weighs less than a penny.
An ostrich’s eye is bigger than its brain.
Dogs are capable of understanding up to 250 words and gestures and have demonstrated the ability to do simple mathematical calculations.
A sheep, a duck and a rooster were the first passengers in a hot air balloon.
Birds don’t urinate.
A flea can jump up to 200 times its own height. That is the equivalent of a human jumping the Empire State Building.
There are 5 temples in Kyoto, Japan that have blood stained ceilings. The ceilings are made from the floorboards of a castle where warriors killed themselves after a long hold-off against an army. To this day, you can still see the outlines and footprints.
There is a snake, called the boomslang, whose venom causes you to bleed out from every orifice on your body. You may even turn blue from internal bleeding, and it can take up to 5 days to die from the bleeding.
Saturn’s density is low enough that the planet would float in water.
68% of the universe is dark energy, and 27% is dark matter; both are invisible, even with our powerful telescopes. This means we have only seen 5% of the universe from earth.
The founders of Google were willing to sell Google for $1 million to Excite in 1999, but Excite turned them down. Google is now worth $527 Billion.
In the past 20 years, scientists have found over 1,000 planets outside of our solar system.
There are 60,000 miles of blood vessels in the human body.
If a pregnant woman has organ damage, the baby in her womb sends stem cells to help repair the organ.
If you started with $0.01 and doubled your money every day, it would take 27 days to become a millionaire.
Only one person in two billion will live to be 116 or older.
A person can live without food for about a month, but only about a week without water.
On average, 12 newborns will be given to the wrong parents daily.
You can’t kill yourself by holding your breath.
Human birth control pills work on gorillas.
There are no clocks in Las Vegas gambling casinos.
Beetles taste like apples, wasps like pine nuts, and worms like fried bacon.
Months that begin on a Sunday will always have a “Friday the 13th.”
The placement of a donkey’s eyes in its head enables it to see all four feet at all times!
Some worms will eat themselves if they can’t find any food!
Dolphins sleep with one eye open!
It is impossible to sneeze with your eyes open.
In France, it is legal to marry a dead person.
Russia has a larger surface area than Pluto.
There’s an opera house on the U.S.–Canada border where the stage is in one country and half the audience is in another.
The harder you concentrate on falling asleep, the less likely you are to fall asleep.
You can’t hum while holding your nose closed.
Women have twice as many pain receptors on their body than men. But a much higher pain tolerance.
There are more stars in space than there are grains of sand on every beach in the world.
For every human on Earth there are 1.6 million ants. The total weight of all those ants, however, is about the same as all the humans.
On Jupiter and Saturn it rains diamonds.
It is impossible to lick your elbow.
A shrimp’s heart is in its head.
People say "Bless you" when you sneeze because when you sneeze, your heart stops for a millisecond.
In a study of 200,000 ostriches over a period of 80 years, no one reported a single case where an ostrich buried its head in the sand.
Rats and horses can’t vomit.
If you sneeze too hard, you can fracture a rib.
If you keep your eyes open by force when you sneeze, you might pop an eyeball out.
Rats multiply so quickly that in 18 months, two rats could have over a million descendants.
Wearing headphones for just an hour will increase the bacteria in your ear by 700 times.
In every episode of Seinfeld there is a Superman somewhere.
35% of the people who use personal ads for dating are already married.
23% of all photocopier faults worldwide are caused by people sitting on them and photocopying their butts.
Most lipstick contains fish scales.
Over 75% of people who read this will try to lick their elbow.
A crocodile can’t move its tongue and cannot chew. Its digestive juices are so strong that it can digest a steel nail.
Money notes are not made from paper, they are made mostly from a special blend of cotton and linen. In 1932, when a shortage of cash occurred in Tenino, Washington, USA, notes were made out of wood for a brief period.
The Grammy Awards were introduced to counter the threat of rock music. In the late 1950s, a group of record executives were alarmed by the explosive success of rock ‘n’ roll, considering it a threat to “quality” music.
Tea is said to have been discovered in 2737 BC by a Chinese emperor when some tea leaves accidentally blew into a pot of boiling water. The tea bag was introduced in 1908 by Thomas Sullivan.
Over the last 150 years the average height of people in industrialized nations has increased about 4 inches. In the 19th century, American men were the tallest in the world, averaging 5′6″. Today, the average height for American men is 5′7″, compared to 5′8″ for Swedes, and 5′8.5″ for the Dutch. The tallest nation in the world is the Watusis of Burundi.
In 1955 the richest woman in the world was Mrs. Hetty Green Wilks, who left an estate of $95 million in a will that was found in a tin box with four pieces of soap. Queen Elizabeth of Britain and Queen Beatrix of the Netherlands count under the 10 wealthiest women in the world.
Joseph Niepce developed the world’s first photographic image in 1827. Thomas Edison and William Kennedy-Laurie Dickson introduced the film camera in 1894. But the first projection of an image on a screen was made by a German priest. In 1646, Athanasius Kircher used a candle or oil lamp to project hand-painted images onto a white screen.
In 1935 a writer named Dudley Nichols refused to accept the Oscar for his movie The Informer because the Writers Guild was on strike against the movie studios. In 1970 George C. Scott refused the Best Actor Oscar for Patton. In 1972 Marlon Brando refused the Oscar for his role in The Godfather.
The system of democracy was introduced 2,500 years ago in Athens, Greece. The oldest existing governing body operates in Althing, Iceland. It was established in 930 AD.
If the amount of water in your body is reduced by just 1%, you’ll feel thirsty. If it is reduced by 10%, you’ll die.
According to a study by the Economic Research Service, 27% of all food production in Western nations ends up in garbage cans. Yet, 1.2 billion people are underfed – the same number of people who are overweight.
Camels are called “ships of the desert” because of the way they move, not because of their transport capabilities. A dromedary has one hump and a Bactrian camel two humps. The humps are used as fat storage. Thus, an undernourished camel will not have a hump.
In the Durango desert in Mexico, there’s a creepy spot called the “Zone of Silence.” You can’t pick up clear TV or radio signals. And locals say fireballs sometimes appear in the sky.
Ethernet is a registered trademark of Xerox, Unix is a registered trademark of AT&T.
Bill Gates’ first business was Traf-O-Data, a company that created machines which recorded the number of vehicles passing a given point on a road.
Uranus’ orbital axis is tilted at over 90 degrees.
The famed U.S. Geological Survey astronomer Mr. Eugene Shoemaker trained the Apollo astronauts about craters, but never made it into space. Mr. Shoemaker had wanted to be an astronaut but was rejected because of a medical problem. His ashes were placed on board the Lunar Prospector spacecraft before it was launched on January 6, 1998. NASA crashed the probe into a crater on the moon in an attempt to learn if there is water on the moon.
Outside the U.S., Ireland is the largest software producing country in the world.
The first fossilized specimen of Australopithecus afarenisis was named Lucy after the paleontologists’ favorite song “Lucy in the Sky with Diamonds,” by the Beatles.
FIGlet, an ASCII font converter program, stands for Frank, Ian and Glenn’s LETters.
Every human spent about half an hour as a single cell.
Every year about 98% of atoms in your body are replaced.
Hot water is heavier than cold water.
Plutonium – first weighed on August 20th, 1942, by University of Chicago scientists Glenn Seaborg and his colleagues – was the first man-made element.
If you went out into space, you would explode before you suffocated because there’s no air pressure.
The radioactive substance Americium-241 is used in many smoke detectors.
The original IBM-PCs, that had hard drives, referred to the hard drives as Winchester drives. This is due to the fact that the original Winchester drive had a model number of 3030. This is, of course, a Winchester firearm.
Sound travels 15 times faster through steel than through the air.
On average, half of all false teeth have some form of radioactivity.
Only one satellite has been ever been destroyed by a meteor: the European Space Agency’s Olympus in 1993.
Starch is used as a binder in the production of paper. It is the use of a starch coating that controls ink penetration when printing. Cheaper papers do not use as much starch, and this is why your elbows get black when you are leaning over your morning paper.
Sterling silver is not pure silver. Because pure silver is too soft to be used in most tableware it is mixed with copper in the proportion of 92.5% silver to 7.5% copper.
A ball of glass will bounce higher than a ball of rubber. A ball of solid steel will bounce even higher.
A chip of silicon a quarter-inch square has the capacity of the original 1949 ENIAC computer, which occupied a city block.
An ordinary TNT bomb involves atomic reaction and, thus, could be called an atomic bomb. What we call an A-bomb involves nuclear reactions and should be called a nuclear bomb.
At a glance, the Celsius scale makes more sense than the Fahrenheit scale for temperature measuring. But its creator, Anders Celsius, was an oddball scientist. When he first developed his scale, he made the freezing of water 100 degrees and the boiling 0 degrees. No one dared point this out to him, so fellow scientists waited until Celsius died to change the scale.
At a jet plane’s speed of 620 mph, the length of the plane becomes one atom shorter than its original length.
The first full moon to occur on the winter solstice, December 22, commonly called the first day of winter, happened in 1999. Since a full moon on the winter solstice occurred in conjunction with a lunar perigee (point in the moon’s orbit that is closest to Earth), the moon appeared about 14% larger than it does at apogee (the point in its elliptical orbit that is farthest from Earth). Since the Earth is also several million miles closer to the sun at that time of the year than in the summer, sunlight striking the moon was about 7% stronger making it brighter. Also, this was the closest perigee of the Moon of the year since the moon’s orbit is constantly deforming. In places where the weather was clear and there was a snow cover, even car headlights were superfluous.
According to security equipment specialists, security systems that utilize motion detectors won’t function properly if walls and floors are too hot. When an infrared beam is used in a motion detector, it will pick up a person’s body temperature of 98.6 °F compared to the cooler walls and floor. If the room is too hot, the motion detector won’t register a change in the radiated heat of that person’s body when it enters the room and breaks the infrared beam. Your home’s safety might be compromised if you turn your air conditioning off or set the thermostat too high while on summer vacation.
Western Electric successfully brought sound to motion pictures and introduced systems of mobile communications which culminated in the cellular telephone.
On December 23, 1947, Bell Telephone Laboratories in Murray Hill, N.J., held a secret demonstration of the transistor which marked the foundation of modern electronics.
The wick of a trick candle has small amounts of magnesium in them. When you light the candle, you are also lighting the magnesium. When someone tries to blow out the flame, the magnesium inside the wick continues to burn and, in just a split second (or two or three), relights the wick.
Ostriches are often not taken seriously. They can run faster than horses, and the males can roar like lions.
Seals used for their fur get extremely sick when taken aboard ships.
Sloths take two weeks to digest their food.
Guinea pigs and rabbits can’t sweat.
The pet food company Ralston Purina recently introduced, from its subsidiary Purina Philippines, power chicken feed designed to help roosters build muscles for cockfighting, which is popular in many areas of the world. According to the Wall Street Journal, the cockfighting market is huge: The Philippines has five million roosters used for exactly that.
The porpoise is second to man as the most intelligent animal on the planet.
Young beavers stay with their parents for the first two years of their lives before going out on their own.
Skunks can accurately spray their smelly fluid as far as ten feet.
Deer can’t eat hay.
Gopher snakes in Arizona are not poisonous, but when frightened they may hiss and shake their tails like rattlesnakes.
On average, dogs have better eyesight than humans, although not as colorful.
The duckbill platypus can store as many as six hundred worms in the pouches of its cheeks.
The lifespan of a squirrel is about nine years.
North American oysters do not make pearls of any value.
Many sharks lay eggs, but hammerheads give birth to live babies that look like very small duplicates of their parents. Young hammerheads are usually born headfirst, with the tip of their hammer-shaped head folded backward to make them more streamlined for birth.
Gorillas sleep as much as fourteen hours per day.
A biological reserve has been made for golden toads because they are so rare.
There are more than fifty different kinds of kangaroos.
Jellyfish like salt water. A rainy season often reduces the jellyfish population by putting more fresh water into normally salty waters where they live.
The female lion does ninety percent of the hunting.
The odds of seeing three albino deer at once are one in seventy-nine billion, yet one man in Boulder Junction, Wisconsin, took a picture of three albino deer in the woods.
Cats often rub up against people and furniture to lay their scent and mark their territory. They do it this way, as opposed to the way dogs do it, because they have scent glands in their faces.
Cats sleep up to eighteen hours a day, but never quite as deep as humans. Instead, they fall asleep quickly and wake up intermittently to check to see if their environment is still safe.
Catnip, or Nepeta cataria, is an herb with nepetalactone in it. Many think that when cats inhale nepetalactone, it affects hormones that arouse sexual feelings, or at least alter their brain functioning to make them feel “high.” Catnip was originally made, using nepetalactone as a natural bug repellant, but roaming cats would rip up the plants before they could be put to their intended task.
The nematode Caenorhabditis elegans ages the equivalent of five human years for every day they live, so they usually die after about fourteen days. When stressed, though, the worm goes into a comatose state that can last for two or more months. The human equivalent would be to sleep for about two hundred years.
You can tell the sex of a horse by its teeth. Most males have 40, females have 36.
The 57 on Heinz ketchup bottle represents the varieties of pickle the company once had.
Your stomach produces a new layer of mucus every two weeks – otherwise it will digest itself.
The Declaration of Independence was written on hemp paper.
A raisin dropped in a glass of fresh champagne will bounce up and down continuously from the bottom of the glass to the top.
Susan Lucci is the daughter of Phyllis Diller.
315 entries in Webster’s 1996 Dictionary were misspelled.
During the chariot scene in “Ben-Hur” a small red car can be seen in the distance.
Warren Beatty and Shirley MacLaine are brother and sister.
Orcas (killer whales) kill sharks by torpedoing up into the shark’s stomach from underneath, causing the shark to explode.
Donald Duck comics were banned from Finland because he doesn’t wear any pants.
Ketchup was sold in the 1830s as medicine.
Upper and lower case letters are named “upper” and “lower” because in the time when all original print had to be set in individual letters, the “upper case” letters were stored in the case on top of the case that stored the smaller, “lower case” letters.
Leonardo da Vinci could write with one hand and draw with the other at the same time.
Because metal was scarce, the Oscars given out during World War II were made of wood.
The name Wendy was made up for the book Peter Pan, there was never a recorded Wendy before!
There are no words in the dictionary that rhyme with: orange, purple, and silver!
Leonardo Da Vinci invented scissors.
A tiny amount of liquor on a scorpion will make it instantly go mad and sting itself to death.
The mask used by Michael Myers in the original “Halloween” was a Captain Kirk mask painted white.
If you have three quarters, four dimes, and four pennies, you have $1.19. You also have the largest amount of money in coins without being able to make change for a dollar.
The glue on Israeli postage stamps is certified kosher.
Guinness Book of Records holds the record for being the book most often stolen from Public Libraries.
Astronauts are not allowed to eat beans before they go into space because passing wind in a space suit damages them.
The word “queue” is the only word in the English language that is still pronounced the same way when the last four letters are removed.
“Almost” is the longest word in the English language with all the letters in alphabetical order.
“Rhythm” is the longest English word without a vowel.
There is a city called Rome on every continent.
It’s against the law to have a pet dog in Iceland.
Your heart beats over 100,000 times a day.
Horatio Nelson, one of England’s most illustrious admirals was throughout his life, never able to find a cure for his sea-sickness.
The skeleton of Jeremy Bentham is present at all important meetings of the University of London.
Right-handed people live, on average, nine years longer than left-handed people.
Your ribs move about 5 million times a year, every time you breathe!
One quarter of the bones in your body, are in your feet!
The first known transfusion of blood was performed as early as 1667, when Jean-Baptiste, transfused two pints of blood from a sheep to a young man.
Fingernails grow nearly 4 times faster than toenails!
Women blink nearly twice as much as men.
Adolf Hitler was a vegetarian, and had only one testicle.
Honey is one of the only foods that do not spoil. Honey found in the tombs of Egyptian pharaohs has been tasted by archaeologists and found edible.
On average a hedgehog’s heart beats 300 times a minute.
More people are killed each year from bees than from snakes.
The average lead pencil will draw a line 35 miles long or write approximately 50,000 English words.
Camels have three eyelids to protect themselves from blowing sand.
The six official languages of the United Nations are: English, French, Arabic, Chinese, Russian and Spanish.
It’s against the law to burp, or sneeze in a church in Nebraska, USA.
The longest recorded flight of a chicken is 13 seconds.
Queen Elizabeth I. regarded herself as a paragon of cleanliness. She declared that she bathed once every three months, whether she needed it or not.
Slugs have 4 noses.
Owls are the only birds who can see the color blue.
A man named Charles Osborne had the hiccups for 69 years!
A giraffe can clean its ears with its 21-inch tongue!
The average person laughs 10 times a day!
If you yelled for 8 years, 7 months and 6 days you would have produced enough sound energy to heat one cup of coffee.
If you farted consistently for 6 years and 9 months, enough gas is produced to create the energy of an atomic bomb.
The human heart creates enough pressure when it pumps out to the body to squirt blood 30 feet.
A pig’s orgasm lasts 30 minutes.
The male praying mantis cannot copulate while its head is attached to its body. The female initiates sex by ripping the male’s head off.
The flea can jump 350 times its body length. It’s like a human jumping the length of a football field.
The catfish has over 27,000 taste buds.
Some lions mate over 50 times a day.
Butterflies taste with their feet.
The strongest muscle in the body is the tongue.
A cat’s urine glows under a black light.
Starfish have no brains.
Polar bears are left-handed.
Humans and dolphins are the only species that have sex for pleasure.
"""
if __name__ == "__main__":
asyncio.run(main())
|
d18bef91e4b0162e6bcd3f41c3e38bfb3db3b78b
|
6cdb2951be6088b11125f34047cddfb8f231169f
|
/models/DGI.py
|
ed5fdc64c31623a62e37f9d8c3b431e9cdaf7956
|
[] |
no_license
|
pcy1302/DMGI
|
4159d0307b675855782799209b802a0fe13fab36
|
06e17067dd919aa87ad5813e8090312a53223a89
|
refs/heads/master
| 2023-04-14T22:40:16.581565
| 2023-03-31T05:34:01
| 2023-03-31T05:34:01
| 205,034,242
| 133
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,756
|
py
|
DGI.py
|
# Code based on https://github.com/PetarV-/DGI/blob/master/models/dgi.py
import torch
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import torch.nn as nn
from embedder import embedder
from layers import GCN, Discriminator
import numpy as np
np.random.seed(0)
from evaluate import evaluate
class DGI(embedder):
def __init__(self, args):
embedder.__init__(self, args)
self.args = args
def training(self):
features_lst = [feature.to(self.args.device) for feature in self.features]
adj_lst = [adj_.to(self.args.device) for adj_ in self.adj]
final_embeds = []
for m_idx, (features, adj) in enumerate(zip(features_lst, adj_lst)):
metapath = self.args.metapaths_list[m_idx]
print("- Training on {}".format(metapath))
model = modeler(self.args).to(self.args.device)
optimiser = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.l2_coef)
cnt_wait = 0; best = 1e9
b_xent = nn.BCEWithLogitsLoss()
for epoch in range(self.args.nb_epochs):
model.train()
optimiser.zero_grad()
idx = np.random.permutation(self.args.nb_nodes)
shuf_fts = features[:, idx, :].to(self.args.device)
lbl_1 = torch.ones(self.args.batch_size, self.args.nb_nodes)
lbl_2 = torch.zeros(self.args.batch_size, self.args.nb_nodes)
lbl = torch.cat((lbl_1, lbl_2), 1)
lbl = lbl.to(self.args.device)
logits = model(features, shuf_fts, adj, self.args.sparse, None, None, None)
loss = b_xent(logits, lbl)
if loss < best:
best = loss
cnt_wait = 0
torch.save(model.state_dict(), 'saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, metapath))
else:
cnt_wait += 1
if cnt_wait == self.args.patience:
break
loss.backward()
optimiser.step()
model.load_state_dict(torch.load('saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, metapath)))
# Evaluation
embeds, _ = model.embed(features, adj, self.args.sparse)
evaluate(embeds, self.idx_train, self.idx_val, self.idx_test, self.labels, self.args.device)
final_embeds.append(embeds)
embeds = torch.mean(torch.cat(final_embeds), 0).unsqueeze(0)
print("- Integrated")
evaluate(embeds, self.idx_train, self.idx_val, self.idx_test, self.labels, self.args.device)
class modeler(nn.Module):
def __init__(self, args):
super(modeler, self).__init__()
self.args = args
self.gcn = GCN(args.ft_size, args.hid_units, args.activation, args.drop_prob, args.isBias)
# one discriminator
self.disc = Discriminator(args.hid_units)
self.readout_func = self.args.readout_func
def forward(self, seq1, seq2, adj, sparse, msk, samp_bias1, samp_bias2):
h_1 = self.gcn(seq1, adj, sparse)
c = self.readout_func(h_1) # equation 9
c = self.args.readout_act_func(c)
h_2 = self.gcn(seq2, adj, sparse)
ret = self.disc(c, h_1, h_2, samp_bias1, samp_bias2)
return ret
# Detach the return variables
def embed(self, seq, adj, sparse):
h_1 = self.gcn(seq, adj, sparse)
c = self.readout_func(h_1) # positive summary vector
c = self.args.readout_act_func(c) # equation 9
return h_1.detach(), c.detach()
|
2a636cc6a8d5036e04a9bf01f429a377583f06c4
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/11_动态规划/dp分类/树上dp/Ways to Remove Leaves of a Tree.py
|
b67316c497a7d92b9f7267de0eba33a63095de95
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
Ways to Remove Leaves of a Tree.py
|
# 移除二叉树叶子节点的方案数
from math import comb
from typing import Tuple
class Tree:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def solve(self, root: Tree) -> int:
def dfs(root: Tree) -> Tuple[int, int]:
"""返回:子树结点数,排序方案数"""
if not root:
return 0, 1
leftSize, leftRes = dfs(root.left)
rightSize, rightRes = dfs(root.right)
return (
leftSize + rightSize + 1,
leftRes * rightRes * comb(leftSize + rightSize, leftSize),
)
return dfs(root)[1]
|
3a12c849da684eb60b817322b1da8866debf05c3
|
5b78eb7c4fbf2c0ae1846507a60503caaf8de2f9
|
/aspynb/chapter03_problems.py
|
6acd4174ee2d5b1dc1bca66229230dc2e5dfb580
|
[
"MIT"
] |
permissive
|
minireference/noBSLAnotebooks
|
ff95a809cebfe91df9933be6305bcd6cbbbf1a6a
|
423c0041882268daa15d57c2472c82cf2b340e78
|
refs/heads/master
| 2022-04-30T03:55:57.096192
| 2022-03-14T19:43:04
| 2022-03-14T19:43:04
| 56,690,720
| 142
| 33
|
MIT
| 2018-06-17T15:56:37
| 2016-04-20T13:48:35
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,067
|
py
|
chapter03_problems.py
|
def cells():
'''
# 3/ Problem solutions
'''
'''
'''
from sympy import *
init_printing()
'''
'''
'''
'''
'''
## P3.1
'''
'''
'''
AUG = Matrix([
[1, 5, 25],
[2, 1, 32]])
AUG
'''
'''
AUG.rref()
'''
'''
'''
'''
'''
## P3.2
In the above solution we showed how to build `AUG` matrix directly.
This time, we'll build `AUG` by row-joining (`row_join`) a matrix of coefficients and a vector of constants.
'''
'''
'''
A = Matrix([
[3, 3],
[2, S(3)/2]])
A
'''
'''
b = Matrix([6,5]) # b is a column vector
b
'''
'''
# row-join A and b to obtain the augmented matrix
AUG = A.row_join(b)
AUG
'''
'''
'''
### a) Alice
Let's obtain the matrix `AUGA` which is the result after Alice's row operation.
'''
'''
'''
AUGA = AUG.copy() # make a copy of AUG
AUGA[0,:] = AUGA[0,:]/3
AUGA
'''
'''
AUGA[1,:] = AUGA[1,:] - 2*AUGA[0,:]
AUGA
'''
'''
AUGA[1,:] = -2*AUGA[1,:]
AUGA
'''
'''
AUGA[0,:] = AUGA[0,:] - AUGA[1,:]
AUGA
'''
'''
'''
### b) Bob
'''
'''
'''
AUGB = AUG.copy()
AUGB[0,:] = AUGB[0,:] - AUGB[1,:]
AUGB
'''
'''
AUGB[1,:] = AUGB[1,:] - 2*AUGB[0,:]
AUGB
'''
'''
AUGB[1,:] = -1*S(2)/3*AUGB[1,:]
AUGB
'''
'''
AUGB[0,:] = AUGB[0,:] - S(3)/2*AUGB[1,:]
AUGB
'''
'''
'''
'''
'''
### c) Charlotte
'''
'''
'''
AUGC = AUG.copy()
AUGC[0,:], AUGC[1,:] = AUGC[1,:], AUGC[0,:]
AUGC
'''
'''
AUGC[0,:] = AUGC[0,:]/2
AUGC
'''
'''
AUGC[1,:] = AUGC[1,:] - 3*AUGC[0,:]
AUGC
'''
'''
AUGC[1,:] = S(4)/3*AUGC[1,:]
AUGC
'''
'''
AUGC[0,:] = AUGC[0,:] - S(3)/4*AUGC[1,:]
AUGC
'''
'''
'''
'''
'''
## P3.3
'''
'''
'''
# define agmented matrices for three systems of eqns. with unique sol'ns
AUGA = Matrix([
[ -1, -2, -2],
[ 3, 3, 0]])
AUGB = Matrix([
[ 1, -1, -2, 1],
[-2, 3, 3, -1],
[-1, 0, 1, 2]])
AUGC = Matrix([
[ 2, -2, 3, 2],
[ 1, -2, -1, 0],
[-2, 2, 2, 1]])
'''
'''
AUGA
'''
'''
AUGA.rref()
'''
'''
AUGB
'''
'''
AUGB.rref()
'''
'''
AUGC
'''
'''
AUGC.rref()
'''
'''
'''
'''
'''
## P3.4
These three systems of equations have infinitely many solutions.
'''
'''
'''
'''
### P3.4 a)
'''
'''
'''
AUGA = Matrix([
[ -1, -2, -2],
[ 3, 6, 6]])
AUGA
'''
'''
AUGA.rref()
'''
'''
AUGA[0:2,0:2].nullspace()
'''
'''
# the solutions to the sytem of equations represented by AUGA
# is of the form point + nullspace
point = AUGA.rref()[0][:,2]
nullspace = AUGA[0:2,0:2].nullspace()
'''
'''
# the point is also called he particular solution
point
'''
'''
# if the augmented matrix AUGA is [A|b], then the point satisfies A*point = b
print( AUGA[0:2,0:2]*point == AUGA[:,2] )
AUGA[0:2,0:2]*point
'''
'''
'''
#### Finding the null space
'''
'''
'''
# the nullspace of A is one dimensional and spanned by
n = nullspace[0]
n
# every vector n in the nullspace of A satisfies A*n=0
'''
'''
# so solution to A*x=b is any (point+s*n) where s is any real number
# since A*(point+s*n) = A*point + sA*n = A*point + 0 = b.
# Let's verify claim for values of s in the range -5,-4,-3,-2,-1,0,1,2,3,4,5
for s in range(-5,6):
print( AUGA[0:2,0:2]*(point + s*n),
AUGA[0:2,0:2]*(point + s*n) == AUGA[:,2] )
'''
'''
'''
### P3.4 b)
'''
'''
'''
AUGB = Matrix([
[ 1, -1, -2, 1],
[-2, 3, 3, -1],
[-1, 2, 1, 0]])
AUGB
'''
'''
AUGB.rref()
'''
'''
point_B = AUGB.rref()[0][:,3]
nullspace_B = AUGB[0:3,0:3].nullspace()[0]
s = symbols('s')
point_B + s*nullspace_B
'''
'''
'''
### P3.4 c)
'''
'''
'''
AUGC = Matrix([
[ 2, -2, 3, 2],
[ 0, 0, 5, 3],
[-2, 2, 2, 1]])
AUGC
'''
'''
AUGC.rref()
'''
'''
constants = AUGC.rref()[0][:,3]
# construct point_C by placing the constants into the location of the pivots
pivots = AUGC.rref()[1]
point_C = zeros(3,1)
for idx, pivot in enumerate(pivots):
point_C[pivot] = constants[idx]
nullspace_C = AUGC[0:3,0:3].nullspace()[0]
s = symbols('s')
point_C + s*nullspace_C
'''
'''
'''
'''
|
d3496bca5422af1d75a41827477874afa1576140
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/devel/electron24/files/patch-third__party_blink_renderer_build_scripts_run__with__pythonpath.py
|
7d56dad775befc32ee74f79c7f8c389753f0befe
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
patch-third__party_blink_renderer_build_scripts_run__with__pythonpath.py
|
--- third_party/blink/renderer/build/scripts/run_with_pythonpath.py.orig 2022-02-07 13:39:41 UTC
+++ third_party/blink/renderer/build/scripts/run_with_pythonpath.py
@@ -22,6 +22,7 @@ def main():
existing_pp = (
os.pathsep + env['PYTHONPATH']) if 'PYTHONPATH' in env else ''
env['PYTHONPATH'] = os.pathsep.join(python_paths) + existing_pp
+ env['LD_LIBRARY_PATH'] = "${WRKSRC}/out/Release"
sys.exit(subprocess.call([sys.executable] + args, env=env))
|
c1c77de2875259efd61d601af116ef3bcf97d8ec
|
f8c5b73c9706470c4dd60d523096e18bc448a960
|
/certbot/certbot/_internal/tests/plugins/storage_test.py
|
a6f8f876d178afec644db4df3b34a427bc749ef3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
certbot/certbot
|
14ab43d76fcf0242d875d551f0d98334c43e7957
|
b1978ff18837e40d16eedf2090330af53d8ceaa5
|
refs/heads/master
| 2023-09-04T00:37:03.739195
| 2023-08-26T23:19:38
| 2023-08-26T23:19:38
| 26,516,210
| 18,581
| 3,265
|
NOASSERTION
| 2023-09-12T15:18:59
| 2014-11-12T02:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,541
|
py
|
storage_test.py
|
"""Tests for certbot.plugins.storage.PluginStorage"""
import json
import sys
from typing import Iterable
from typing import List
from typing import Optional
import unittest
from unittest import mock
import pytest
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
from certbot.tests import util as test_util
class PluginStorageTest(test_util.ConfigTestCase):
"""Test for certbot.plugins.storage.PluginStorage"""
def setUp(self):
super().setUp()
self.plugin_cls = test_util.DummyInstaller
filesystem.mkdir(self.config.config_dir)
with mock.patch("certbot.reverter.util"):
self.plugin = self.plugin_cls(config=self.config, name="mockplugin")
def test_load_errors_cant_read(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write("dummy")
# When unable to read file that exists
mock_open = mock.mock_open()
mock_open.side_effect = IOError
self.plugin.storage._storagepath = os.path.join(self.config.config_dir,
".pluginstorage.json")
with mock.patch("builtins.open", mock_open):
with mock.patch('certbot.compat.os.path.isfile', return_value=True):
with mock.patch("certbot.reverter.util"):
with pytest.raises(errors.PluginStorageError):
self.plugin.storage._load() # pylint: disable=protected-access
def test_load_errors_empty(self):
with open(os.path.join(self.config.config_dir, ".pluginstorage.json"), "w") as fh:
fh.write('')
with mock.patch("certbot.plugins.storage.logger.debug") as mock_log:
# Should not error out but write a debug log line instead
with mock.patch("certbot.reverter.util"):
nocontent = self.plugin_cls(self.config, "mockplugin")
with pytest.raises(KeyError):
nocontent.storage.fetch("value")
assert mock_log.called
assert "no values loaded" in mock_log.call_args[0][0]
def test_load_errors_corrupted(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write('invalid json')
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
with mock.patch("certbot.reverter.util"):
corrupted = self.plugin_cls(self.config, "mockplugin")
with pytest.raises(errors.PluginError):
corrupted.storage.fetch("value")
assert "is corrupted" in mock_log.call_args[0][0]
def test_save_errors_cant_serialize(self):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
# Set data as something that can't be serialized
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.plugin.storage._storagepath = "/tmp/whatever"
self.plugin.storage._data = self.plugin_cls # pylint: disable=protected-access
with pytest.raises(errors.PluginStorageError):
self.plugin.storage.save()
assert "Could not serialize" in mock_log.call_args[0][0]
def test_save_errors_unable_to_write_file(self):
mock_open = mock.mock_open()
mock_open.side_effect = IOError
with mock.patch("certbot.compat.filesystem.open", mock_open):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
self.plugin.storage._data = {"valid": "data"} # pylint: disable=protected-access
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.plugin.storage._storagepath = "/tmp/whatever"
with pytest.raises(errors.PluginStorageError):
self.plugin.storage.save()
assert "Could not write" in mock_log.call_args[0][0]
def test_save_uninitialized(self):
with mock.patch("certbot.reverter.util"):
with pytest.raises(errors.PluginStorageError):
self.plugin_cls(self.config, "x").storage.save()
def test_namespace_isolation(self):
with mock.patch("certbot.reverter.util"):
plugin1 = self.plugin_cls(self.config, "first")
plugin2 = self.plugin_cls(self.config, "second")
plugin1.storage.put("first_key", "first_value")
with pytest.raises(KeyError):
plugin2.storage.fetch("first_key")
with pytest.raises(KeyError):
plugin2.storage.fetch("first")
assert plugin1.storage.fetch("first_key") == "first_value"
def test_saved_state(self):
self.plugin.storage.put("testkey", "testvalue")
# Write to disk
self.plugin.storage.save()
with mock.patch("certbot.reverter.util"):
another = self.plugin_cls(self.config, "mockplugin")
assert another.storage.fetch("testkey") == "testvalue"
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), 'r') as fh:
psdata = fh.read()
psjson = json.loads(psdata)
assert "mockplugin" in psjson.keys()
assert len(psjson) == 1
assert psjson["mockplugin"]["testkey"] == "testvalue"
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv[1:] + [__file__])) # pragma: no cover
|
bf664c8fac2a9235b2a012c1e5499a76922c7680
|
51a75d73097a460d362908ebf6db9cd623558e7d
|
/query_strategies/strategy.py
|
3a96ba61671c59697d65e754a373756509698cb4
|
[
"MIT"
] |
permissive
|
ej0cl6/deep-active-learning
|
6e72b4ab07b24ff445da06977836eb6dc96ab9b2
|
eea3d2c703b83ce412f7dd6f518c017713d15ced
|
refs/heads/master
| 2022-10-17T23:36:07.207405
| 2022-10-03T20:23:35
| 2022-10-03T20:23:35
| 131,947,745
| 715
| 192
|
MIT
| 2022-01-23T07:51:42
| 2018-05-03T05:51:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
strategy.py
|
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
class Strategy:
def __init__(self, dataset, net):
self.dataset = dataset
self.net = net
def query(self, n):
pass
def update(self, pos_idxs, neg_idxs=None):
self.dataset.labeled_idxs[pos_idxs] = True
if neg_idxs:
self.dataset.labeled_idxs[neg_idxs] = False
def train(self):
labeled_idxs, labeled_data = self.dataset.get_labeled_data()
self.net.train(labeled_data)
def predict(self, data):
preds = self.net.predict(data)
return preds
def predict_prob(self, data):
probs = self.net.predict_prob(data)
return probs
def predict_prob_dropout(self, data, n_drop=10):
probs = self.net.predict_prob_dropout(data, n_drop=n_drop)
return probs
def predict_prob_dropout_split(self, data, n_drop=10):
probs = self.net.predict_prob_dropout_split(data, n_drop=n_drop)
return probs
def get_embeddings(self, data):
embeddings = self.net.get_embeddings(data)
return embeddings
|
3668f38dbec0d7ed902d25030804193ececd430b
|
ea7e87037d0a859250b3b0768fe657ab8520c8be
|
/Python/tigre/algorithms/single_pass_algorithms.py
|
3fac062145eff8c48683c283c92149469b0304d8
|
[
"BSD-3-Clause"
] |
permissive
|
CERN/TIGRE
|
80e99d4a49a2af2ec2248db8be3c48142df37134
|
aa4651538e9bce7d0fee2cd2fcf0baa9fcb2ae19
|
refs/heads/master
| 2023-09-03T15:12:02.100453
| 2023-08-16T09:54:34
| 2023-08-16T09:54:34
| 61,034,131
| 473
| 194
|
BSD-3-Clause
| 2023-09-11T11:41:49
| 2016-06-13T12:22:21
|
MATLAB
|
UTF-8
|
Python
| false
| false
| 3,750
|
py
|
single_pass_algorithms.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tigre.utilities.Atb import Atb
from tigre.utilities.filtering import filtering
def FDK(proj, geo, angles, **kwargs):
"""
solves CT image reconstruction.
:param proj: np.array(dtype=float32),
Data input in the form of 3d
:param geo: tigre.utilities.geometry.Geometry
Geometry of detector and image (see examples/Demo code)
:param angles: np.array(dtype=float32)
Angles of projection, shape = (nangles,3) or (nangles,)
:param filter: str
Type of filter used for backprojection
opts: "shep_logan"
"cosine"
"hamming"
"hann"
:param verbose: bool
Feedback print statements for algorithm progress
:param kwargs: dict
keyword arguments
:return: np.array(dtype=float32)
Usage:
-------
>>> import tigre
>>> import tigre.algorithms as algs
>>> import numpy
>>> from tigre.demos.Test_data import data_loader
>>> geo = tigre.geometry(mode='cone',default_geo=True,
>>> nVoxel=np.array([64,64,64]))
>>> angles = np.linspace(0,2*np.pi,100)
>>> src_img = data_loader.load_head_phantom(geo.nVoxel)
>>> proj = tigre.Ax(src_img,geo,angles)
>>> output = algs.FDK(proj,geo,angles)
tigre.demos.run() to launch ipython notebook file with examples.
--------------------------------------------------------------------
This file is part of the TIGRE Toolbox
Copyright (c) 2015, University of Bath and
CERN-European Organization for Nuclear Research
All rights reserved.
License: Open Source under BSD.
See the full license at
https://github.com/CERN/TIGRE/license.txt
Contact: tigre.toolbox@gmail.com
Codes: https://github.com/CERN/TIGRE/
--------------------------------------------------------------------
Coded by: MATLAB (original code): Ander Biguri
PYTHON : Reuben Lindroos
"""
verbose = kwargs["verbose"] if "verbose" in kwargs else False
gpuids = kwargs["gpuids"] if "gpuids" in kwargs else None
geo = copy.deepcopy(geo)
geo.check_geo(angles)
geo.checknans()
geo.filter = kwargs["filter"] if "filter" in kwargs else None
# Weight
proj_filt = np.zeros(proj.shape, dtype=np.float32)
xv = np.arange((-geo.nDetector[1] / 2) + 0.5,
1 + (geo.nDetector[1] / 2) - 0.5) * geo.dDetector[1]
yv = np.arange((-geo.nDetector[0] / 2) + 0.5,
1 + (geo.nDetector[0] / 2) - 0.5) * geo.dDetector[0]
(yy, xx) = np.meshgrid(xv, yv)
w = geo.DSD[0] / np.sqrt((geo.DSD[0] ** 2 + xx ** 2 + yy ** 2))
np.multiply(proj, w, out=proj_filt)
proj_filt = filtering(proj_filt, geo, angles, parker=False, verbose=verbose)
return Atb(proj_filt, geo, geo.angles, "FDK", gpuids=gpuids)
fdk = FDK
def fbp(proj, geo, angles, **kwargs): # noqa: D103
__doc__ = FDK.__doc__ # noqa: F841
if geo.mode != "parallel":
raise ValueError("Only use FBP for parallel beam. Check geo.mode.")
geox = copy.deepcopy(geo)
geox.check_geo(angles)
verbose = kwargs["verbose"] if "verbose" in kwargs else False
gpuids = kwargs["gpuids"] if "gpuids" in kwargs else None
proj_filt = filtering(copy.deepcopy(proj), geox, angles, parker=False, verbose=verbose)
return Atb(proj_filt, geo, angles, gpuids=gpuids) * geo.DSO / geo.DSD
|
e26cd3c46c526be1f4d380f1b492812165e583b5
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/debug/stepping/test_smart_step_into_comparison_operator.py
|
de3cc2d10add833590c83fb1ca0346bc569e2692
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
test_smart_step_into_comparison_operator.py
|
from __future__ import print_function
class A:
def __init__(self, a):
self.a = a
def __gt__(self, other):
if (self.a > other.a):
return True
else:
return False
if (A(2) > A(3) > A(1)):
print("ob1 is greater than ob2")
else:
print("ob2 is greater than ob1")
|
cd120abeaa3c1f7dca17c8d7bdf4bcdd4b6aa412
|
1c7622f563fac18d9644e883742b6f539665aa26
|
/pytradfri/device/__init__.py
|
182d5fb1d62e20eafc85f33db791fd03e33eea88
|
[
"MIT"
] |
permissive
|
home-assistant-libs/pytradfri
|
3708d65edb236c3b8f430cdf59f45b1d4849352f
|
0d7bfbfc63670c050570c96bf3f4a1b20c8091a3
|
refs/heads/master
| 2023-08-29T08:44:07.477619
| 2023-08-28T06:10:00
| 2023-08-28T06:10:00
| 87,844,301
| 233
| 44
|
MIT
| 2023-09-13T02:00:41
| 2017-04-10T18:30:17
|
Python
|
UTF-8
|
Python
| false
| false
| 7,044
|
py
|
__init__.py
|
"""Classes to interact with devices."""
from __future__ import annotations
from datetime import datetime
from pydantic import BaseModel, Field
from ..const import (
ATTR_APPLICATION_TYPE,
ATTR_DEVICE_BATTERY,
ATTR_DEVICE_FIRMWARE_VERSION,
ATTR_DEVICE_INFO,
ATTR_DEVICE_MANUFACTURER,
ATTR_DEVICE_MODEL_NUMBER,
ATTR_DEVICE_POWER_SOURCE,
ATTR_DEVICE_SERIAL,
ATTR_LAST_SEEN,
ATTR_LIGHT_CONTROL,
ATTR_REACHABLE_STATE,
ATTR_START_BLINDS,
ATTR_SWITCH_PLUG,
ROOT_AIR_PURIFIER,
ROOT_DEVICES,
ROOT_SIGNAL_REPEATER,
)
from ..device.light import LightResponse
from ..resource import ApiResource, ApiResourceResponse
from .air_purifier import AirPurifierResponse
from .air_purifier_control import AirPurifierControl
from .blind import BlindResponse
from .blind_control import BlindControl
from .light_control import LightControl
from .signal_repeater import SignalRepeaterResponse
from .signal_repeater_control import SignalRepeaterControl
from .socket import SocketResponse
from .socket_control import SocketControl
class DeviceInfoResponse(BaseModel):
"""Represent the device info part of the device response."""
manufacturer: str = Field(alias=ATTR_DEVICE_MANUFACTURER)
model_number: str = Field(alias=ATTR_DEVICE_MODEL_NUMBER)
serial: str = Field(alias=ATTR_DEVICE_SERIAL)
firmware_version: str = Field(alias=ATTR_DEVICE_FIRMWARE_VERSION)
power_source: int | None = Field(alias=ATTR_DEVICE_POWER_SOURCE)
battery_level: int | None = Field(alias=ATTR_DEVICE_BATTERY)
class DeviceResponse(ApiResourceResponse):
"""Represent a device response."""
air_purifier_control: list[AirPurifierResponse] | None = Field(
alias=ROOT_AIR_PURIFIER
)
application_type: int = Field(alias=ATTR_APPLICATION_TYPE)
blind_control: list[BlindResponse] | None = Field(alias=ATTR_START_BLINDS)
device_info: DeviceInfoResponse = Field(alias=ATTR_DEVICE_INFO)
last_seen: int | None = Field(alias=ATTR_LAST_SEEN)
light_control: list[LightResponse] | None = Field(alias=ATTR_LIGHT_CONTROL)
reachable: int = Field(alias=ATTR_REACHABLE_STATE)
signal_repeater_control: list[SignalRepeaterResponse] | None = Field(
alias=ROOT_SIGNAL_REPEATER
)
socket_control: list[SocketResponse] | None = Field(alias=ATTR_SWITCH_PLUG)
class Device(ApiResource):
"""Base class for devices."""
_model_class: type[DeviceResponse] = DeviceResponse
raw: DeviceResponse
@property
def application_type(self) -> int:
"""Return application type."""
return self.raw.application_type
@property
def path(self) -> list[str]:
"""Return path."""
return [ROOT_DEVICES, str(self.id)]
@property
def device_info(self) -> DeviceInfo:
"""Return Device information."""
return DeviceInfo(self)
@property
def last_seen(self) -> datetime | None:
"""Return timestamp when last seen."""
if (last_seen := self.raw.last_seen) is not None:
return datetime.utcfromtimestamp(last_seen)
return None
@property
def reachable(self) -> bool:
"""Check if gateway is reachable."""
return self.raw.reachable == 1
@property
def has_light_control(self) -> bool:
"""Check if light_control is present."""
return self.raw.light_control is not None
@property
def light_control(self) -> LightControl | None:
"""Return light_control."""
if self.has_light_control:
return LightControl(self)
return None
@property
def has_socket_control(self) -> bool:
"""Check if socket_control is present."""
return self.raw.socket_control is not None
@property
def socket_control(self) -> SocketControl | None:
"""Return socket_control."""
if self.has_socket_control:
return SocketControl(self)
return None
@property
def has_blind_control(self) -> bool:
"""Check if blind_control is present."""
return self.raw.blind_control is not None
@property
def blind_control(self) -> BlindControl | None:
"""Return blind_control."""
if self.has_blind_control:
return BlindControl(self)
return None
@property
def has_signal_repeater_control(self) -> bool:
"""Check if signal_repeater_control is present."""
return self.raw.signal_repeater_control is not None
@property
def signal_repeater_control(self) -> SignalRepeaterControl | None:
"""Return signal_repeater control, if any."""
if self.has_signal_repeater_control:
return SignalRepeaterControl(self)
return None
@property
def has_air_purifier_control(self) -> bool:
"""Check if air_purifier_control is present."""
return self.raw.air_purifier_control is not None
@property
def air_purifier_control(self) -> AirPurifierControl | None:
"""Return air_purifier control, if any."""
if self.has_air_purifier_control:
return AirPurifierControl(self)
return None
def __repr__(self) -> str:
"""Return representation of class object."""
return f"<{self.id} - {self.name} ({self.device_info.model_number})>"
class DeviceInfo:
"""Represent device information.
http://www.openmobilealliance.org/tech/profiles/LWM2M_Device-v1_0.xml
"""
VALUE_POWER_SOURCES = {
1: "Internal Battery",
2: "External Battery",
3: "Battery", # Not in spec, used by remote
4: "Power over Ethernet",
5: "USB",
6: "AC (Mains) power",
7: "Solar",
}
def __init__(self, device: Device) -> None:
"""Create object of class."""
self._device = device
@property
def manufacturer(self) -> str:
"""Human readable manufacturer name."""
return self.raw.manufacturer
@property
def model_number(self) -> str:
"""Return model identifier string (manufacturer specified string)."""
return self.raw.model_number
@property
def serial(self) -> str:
"""Return serial string."""
return self.raw.serial
@property
def firmware_version(self) -> str:
"""Return current firmware version of device."""
return self.raw.firmware_version
@property
def power_source(self) -> int | None:
"""Power source."""
return self.raw.power_source
@property
def power_source_str(self) -> str | None:
"""Represent current power source."""
if self.raw.power_source is not None:
return self.VALUE_POWER_SOURCES.get(self.raw.power_source, "Unknown")
return None
@property
def battery_level(self) -> int | None:
"""Battery in 0..100."""
return self.raw.battery_level
@property
def raw(self) -> DeviceInfoResponse:
"""Return raw data that it represents."""
return self._device.raw.device_info
|
e6914bd73ff8cfa9d0f7421265f985fdd0bdebb7
|
13aa7b9d76c364eb6b112ad057f81ea2fa63da3a
|
/modules/vulnerability-analysis/skipfish.py
|
1c895907a3a5c7975524addabde9c090ef313c22
|
[] |
no_license
|
trustedsec/ptf
|
df2ed6671c9efd05db756faec41298fbfb50bc5a
|
f87dfa8b3b62f2157fc51e8abe31bf3f0bf8541c
|
refs/heads/master
| 2023-08-31T08:01:58.033518
| 2023-03-15T17:04:49
| 2023-03-15T17:04:49
| 35,505,904
| 5,087
| 1,434
| null | 2023-08-27T22:33:41
| 2015-05-12T18:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 934
|
py
|
skipfish.py
|
#!/usr/bin/env python
#####################################
# Installation module for skipfish
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="Mauro Risonho de Paula Assumpcao (firebits)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update skipfish - web application security scanner"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/spinkham/skipfish.git"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="skipfish"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="gcc git libidn11-dev libssl-dev build-essential zlibc libidn11-dev libidn11 libpcre3-dev"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git,gcc,openssl-devel,make,automake,gcc,gcc-c++,kernel-devel,zlib,zlib-devel,libidn,libidn-devel,pcre-devel"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS="cd {INSTALL_LOCATION},make"
|
8783f7ad7c726420f4ed0031b05c220ca3b628b5
|
2f1e3f24f2798507c9eb73185a955c9bfb735140
|
/libserving/serialization/knn.py
|
59812b179d2dba82dbd9f6cc0c4fa18f3d753a23
|
[
"MIT"
] |
permissive
|
massquantity/LibRecommender
|
e4f55b06b2208c794a3f97f7ff89413fa9beaffa
|
8d5fbe9c177f5b91c2b6f19a155a83320dd0e20c
|
refs/heads/master
| 2023-08-31T23:48:37.634663
| 2023-08-20T11:58:15
| 2023-08-20T11:58:15
| 174,493,761
| 251
| 55
|
MIT
| 2023-08-20T11:58:16
| 2019-03-08T07:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
knn.py
|
import os
from scipy import sparse
from libreco.bases import CfBase
from .common import (
check_path_exists,
save_id_mapping,
save_model_name,
save_to_json,
save_user_consumed,
)
def save_knn(path: str, model: CfBase, k: int):
"""Save KNN model to disk.
Parameters
----------
path : str
Model saving path.
model : CfBase
Model to save.
k : int
Number of similar users/items to save.
"""
check_path_exists(path)
save_model_name(path, model)
save_id_mapping(path, model.data_info)
save_user_consumed(path, model.data_info)
save_sim_matrix(path, model.sim_matrix, k)
def save_sim_matrix(path: str, sim_matrix: sparse.csr_matrix, k: int):
k_sims = dict()
num = len(sim_matrix.indptr) - 1
indices = sim_matrix.indices.tolist()
indptr = sim_matrix.indptr.tolist()
data = sim_matrix.data.tolist()
for i in range(num):
i_slice = slice(indptr[i], indptr[i + 1])
sorted_sims = sorted(zip(indices[i_slice], data[i_slice]), key=lambda x: -x[1])
k_sims[i] = sorted_sims[:k]
sim_path = os.path.join(path, "sim.json")
save_to_json(sim_path, k_sims)
|
e6f2a88b8ce2cddc0030b6d372e0aeb074949359
|
e9869359c839c8c175ae7877bc35dcfdfe4058f8
|
/kornia/feature/sosnet.py
|
7d13bfa8667fd6096555fa19978d0b8cc5e179ae
|
[
"Apache-2.0"
] |
permissive
|
kornia/kornia
|
80f93eae6a70b8bc0c9784f92a842ab9a6ab54ae
|
1e0f8baa7318c05b17ea6dbb48605691bca8972f
|
refs/heads/master
| 2023-08-31T06:32:45.960859
| 2023-08-30T21:59:41
| 2023-08-30T21:59:41
| 145,693,916
| 7,351
| 833
|
Apache-2.0
| 2023-09-12T21:59:29
| 2018-08-22T10:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,733
|
py
|
sosnet.py
|
from typing import Dict
import torch
from torch import nn
from kornia.core.check import KORNIA_CHECK_SHAPE
from kornia.utils.helpers import map_location_to_cpu
urls: Dict[str, str] = {}
urls["lib"] = "https://github.com/yuruntian/SOSNet/raw/master/sosnet-weights/sosnet_32x32_liberty.pth"
urls["hp_a"] = "https://github.com/yuruntian/SOSNet/raw/master/sosnet-weights/sosnet_32x32_hpatches_a.pth"
class SOSNet(nn.Module):
r"""128-dimensional SOSNet model definition for 32x32 patches.
This is based on the original code from paper
"SOSNet:Second Order Similarity Regularization for Local Descriptor Learning".
Args:
pretrained: Download and set pretrained weights to the model.
Shape:
- Input: :math:`(B, 1, 32, 32)`
- Output: :math:`(B, 128)`
Examples:
>>> input = torch.rand(8, 1, 32, 32)
>>> sosnet = SOSNet()
>>> descs = sosnet(input) # 8x128
"""
patch_size = 32
def __init__(self, pretrained: bool = False) -> None:
super().__init__()
self.layers = nn.Sequential(
nn.InstanceNorm2d(1, affine=False),
nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(32, affine=False),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64, affine=False),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(128, affine=False),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(128, 128, kernel_size=8, bias=False),
nn.BatchNorm2d(128, affine=False),
)
self.desc_norm = nn.Sequential(nn.LocalResponseNorm(256, alpha=256.0, beta=0.5, k=0.0))
# load pretrained model
if pretrained:
pretrained_dict = torch.hub.load_state_dict_from_url(urls['lib'], map_location=map_location_to_cpu)
self.load_state_dict(pretrained_dict, strict=True)
self.eval()
def forward(self, input: torch.Tensor, eps: float = 1e-10) -> torch.Tensor:
KORNIA_CHECK_SHAPE(input, ["B", "1", "32", "32"])
descr = self.desc_norm(self.layers(input) + eps)
descr = descr.view(descr.size(0), -1)
return descr
|
e7c0709c7a3f1301a74908e3c6e163249706a104
|
f597af39c32bb29d2be446d3c83cc512efd66a9d
|
/website/build_rules.py
|
66790f4ae1df4ea3556a25bf50a34be76c16ccdb
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
thibaudcolas/curlylint
|
dc099b1bb5f438be065bd3a5da1b1b0dfcd39236
|
4c36043873f973aa49ed471c238a1de17d603db5
|
refs/heads/main
| 2023-08-17T18:10:10.941564
| 2022-03-30T08:45:04
| 2022-03-30T08:45:04
| 236,965,316
| 215
| 30
|
MIT
| 2023-06-08T01:35:40
| 2020-01-29T11:18:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 8,962
|
py
|
build_rules.py
|
# -*- coding: utf-8 -*-
# type: ignore
import codecs
import json
import os
import toml
from curlylint.rules.aria_role import aria_role
from curlylint.rules.django_forms_rendering import django_forms_rendering
from curlylint.rules.html_has_lang import html_has_lang
from curlylint.rules.image_alt import image_alt
from curlylint.rules.indent import indent
from curlylint.rules.meta_viewport import meta_viewport
from curlylint.rules.no_autofocus import no_autofocus
from curlylint.rules.tabindex_no_positive import tabindex_no_positive
rules = [
aria_role.RULE,
django_forms_rendering.RULE,
html_has_lang.RULE,
image_alt.RULE,
indent.RULE,
meta_viewport.RULE,
no_autofocus.RULE,
tabindex_no_positive.RULE,
]
if __name__ == "__main__":
for rule in rules:
test_cases_path = os.path.join(
os.path.dirname(__file__),
"..",
"curlylint",
"rules",
rule["id"],
f"{rule['id']}_test.json",
)
test_cases = json.loads(open(test_cases_path, "r").read())
description = rule["docs"]["description"]
impact = rule["docs"]["impact"]
config_toml = []
config_cli = []
one_off_schema = rule["schema"].get("oneOf", [])
for item in one_off_schema:
title = f"# {item['title']}"
example = item["examples"][0]
config_toml.append(title)
config_cli.append(title)
config_toml.append(
toml.dumps({rule["id"]: example}).replace("\n", "")
)
config_cli.append(
f"curlylint --rule '{rule['id']}: {json.dumps(example)}' ."
)
config_toml_str = "\\n".join(config_toml).replace("`", "\\`")
config_cli_str = "\\n".join(config_cli).replace("`", "\\`")
config_section = ""
if config_toml_str:
config_section = f"""This rule supports the following configuration:
<Tabs
groupId="config-language"
defaultValue="toml"
values={{[
{{ label: "TOML", value: "toml" }},
{{ label: "Shell", value: "shell" }},
]}}
>
<TabItem value="toml">
<CodeSnippet
snippet={{`{config_toml_str}`}}
annotations={{[]}}
lang="toml"
/>
</TabItem>
<TabItem value="shell">
<CodeSnippet
snippet={{`{config_cli_str}`}}
annotations={{[]}}
lang="shell"
/>
</TabItem>
</Tabs>"""
success_section = ""
fail_section = ""
if test_cases:
success_cases = filter(
lambda c: c.get("example") and len(c["output"]) == 0, test_cases
)
success_toml = []
success_cli = []
for c in success_cases:
toml_config = toml.dumps({rule["id"]: c["config"]}).replace(
"\n", ""
)
success_toml.append(f"<!-- Good: {c['label']} -->")
success_toml.append(f"<!-- {toml_config} -->")
success_toml.append(c["template"])
success_cli.append(f"<!-- Good: {c['label']} -->")
success_cli.append(
f"<!-- curlylint --rule '{rule['id']}: {json.dumps(c['config'])}' . -->"
)
success_cli.append(c["template"])
success_toml_str = "\\n".join(success_toml).replace("`", "\\`")
success_cli_str = "\\n".join(success_cli).replace("`", "\\`")
if success_cases:
success_section = f"""## Success
<Tabs
groupId="config-language"
defaultValue="toml"
values={{[
{{ label: "TOML", value: "toml" }},
{{ label: "Shell", value: "shell" }},
]}}
>
<TabItem value="toml">
<CodeSnippet
snippet={{`{success_toml_str}`}}
annotations={{[]}}
lang="html"
/>
</TabItem>
<TabItem value="shell">
<CodeSnippet
snippet={{`{success_cli_str}`}}
annotations={{[]}}
lang="html"
/>
</TabItem>
</Tabs>
"""
fail_cases = filter(
lambda c: c.get("example") and len(c["output"]) > 0, test_cases
)
fail_annotations = []
fail_toml = []
fail_cli = []
for c in fail_cases:
toml_config = toml.dumps({rule["id"]: c["config"]}).replace(
"\n", ""
)
fail_toml.append(f"<!-- Bad: {c['label']} -->")
fail_toml.append(f"<!-- {toml_config} -->")
fail_toml.append(c["template"])
fail_cli.append(f"<!-- Bad: {c['label']} -->")
fail_cli.append(
f"<!-- curlylint --rule '{rule['id']}: {json.dumps(c['config'])}' . -->"
)
fail_cli.append(c["template"])
fail_annotations = fail_annotations + [
{
"file": o["file"],
"column": o["column"],
"line": o["line"] + 2 + (len(fail_annotations)) * 3,
"code": o["code"],
"message": o["message"],
}
for o in c["output"]
]
fail_toml_str = "\\n".join(fail_toml).replace("`", "\\`")
fail_cli_str = "\\n".join(fail_cli).replace("`", "\\`")
if fail_cases:
fail_section = f"""## Fail
<Tabs
groupId="config-language"
defaultValue="toml"
values={{[
{{ label: "TOML", value: "toml" }},
{{ label: "Shell", value: "shell" }},
]}}
>
<TabItem value="toml">
<CodeSnippet
snippet={{`{fail_toml_str}\\n\\n`}}
annotations={{{json.dumps(fail_annotations)}}}
lang="html"
/>
</TabItem>
<TabItem value="shell">
<CodeSnippet
snippet={{`{fail_cli_str}\\n\\n`}}
annotations={{{json.dumps(fail_annotations)}}}
lang="html"
/>
</TabItem>
</Tabs>
"""
resources_section = ""
if rule["docs"]["resources"]:
resources = "\n".join([f"- {r}" for r in rule["docs"]["resources"]])
resources_section = f"""## Resources\n\n{resources}"""
with codecs.open(f"docs/rules/{rule['id']}.mdx", "w", "utf-8") as file:
file.write(
f"""---
# This file is auto-generated, please do not update manually.
id: {rule['id']}
title: {rule['id']}
custom_edit_url: https://github.com/thibaudcolas/curlylint/edit/main/curlylint/rules/{rule['id']}/{rule['id']}.py
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import CodeSnippet from "@theme/CodeSnippet";
> {description}
>
> User impact: **{impact}**
{config_section}
{success_section}
{fail_section}
{resources_section}
"""
)
rules_list = "\n".join(
[
f"- [{rule['id']}]({rule['id']}): {rule['docs']['description']}"
for rule in rules
]
)
rules_id = ",\n ".join([f"\"rules/{rule['id']}\"" for rule in rules])
with codecs.open("rules-sidebar.js", "w", "utf-8") as file:
file.write(
f"""module.exports = [
{rules_id}
];
"""
)
all_config_toml = []
all_config_cli = []
for rule in rules:
one_off_schema = rule["schema"].get("oneOf", [])
first_config = one_off_schema[0]
title = f"# {first_config['title']}"
example = first_config["examples"][0]
all_config_toml.append(title)
all_config_toml.append(f"# See {rule['docs']['url']}.")
all_config_toml.append(
toml.dumps({rule["id"]: example}).replace("\n", "")
)
all_config_cli.append(f"--rule '{rule['id']}: {json.dumps(example)}'")
all_config_toml_str = "\\n".join(all_config_toml).replace("`", "\\`")
all_config_cli_str = " ".join(all_config_cli)
with codecs.open("docs/rules/all.mdx", "w", "utf-8") as file:
file.write(
f"""---
# This file is auto-generated, please do not update manually.
id: all
title: All rules
custom_edit_url: https://github.com/thibaudcolas/curlylint/edit/main/website/build_rules.py
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import CodeSnippet from "@theme/CodeSnippet";
{rules_list}
## Try them all
Here is a sample configuration with all of Curlylint’s rules enabled. Note **this isn’t a recommended configuration**, just a convenient way to try it all at once:
<Tabs
groupId="config-language"
defaultValue="toml"
values={{[
{{ label: "TOML", value: "toml" }},
{{ label: "Shell", value: "shell" }},
]}}
>
<TabItem value="toml">
<CodeSnippet
snippet={{`[tool.curlylint.rules]\\n{all_config_toml_str}`}}
annotations={{[]}}
lang="toml"
/>
</TabItem>
<TabItem value="shell">
<CodeSnippet
snippet={{`curlylint {all_config_cli_str} .`}}
annotations={{[]}}
lang="shell"
/>
</TabItem>
</Tabs>
"""
)
|
25c98dd3f2584b2cdea96a150d6c2fe74082d25b
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/2014. Longest Subsequence Repeated k Times/2014.py
|
89feb5df1e6cd0c83c2f3c33040973593b880203
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 950
|
py
|
2014.py
|
class Solution:
def longestSubsequenceRepeatedK(self, s: str, k: int) -> str:
ans = ''
count = [0] * 26
possibleChars = []
q = collections.deque(['']) # Store subseqs, length grows by 1 each time
for c in s:
count[ord(c) - ord('a')] += 1
for c in string.ascii_lowercase:
if count[ord(c) - ord('a')] >= k:
possibleChars.append(c)
def isSubsequence(subseq: str, s: str, k: int) -> bool:
i = 0 # subseq's index
for c in s:
if c == subseq[i]:
i += 1
if i == len(subseq):
k -= 1
if k == 0:
return True
i = 0
return False
while q:
currSubseq = q.popleft()
if len(currSubseq) * k > len(s):
return ans
for c in possibleChars:
newSubseq = currSubseq + c
if isSubsequence(newSubseq, s, k):
q.append(newSubseq)
ans = newSubseq
return ans
|
94ddbfea0c1f1cd03d330c2a88e703733354e66f
|
66333a96a8806181228a860085ee5d01888c7b2f
|
/image_classification/resnet50/resnet50.py
|
ec66a39b19af9c8bf5f4190bade2d020bb038879
|
[
"MIT"
] |
permissive
|
axinc-ai/ailia-models
|
7c5c15d813a58fcae678a8107ebc9c39ee9912f2
|
da1c277b602606586cd83943ef6b23eb705ec604
|
refs/heads/master
| 2023-08-31T07:43:39.848448
| 2023-08-29T05:06:27
| 2023-08-29T05:06:27
| 206,917,330
| 1,554
| 286
| null | 2023-09-14T19:56:14
| 2019-09-07T04:50:59
|
Python
|
UTF-8
|
Python
| false
| false
| 6,193
|
py
|
resnet50.py
|
import sys
import time
import ailia
import cv2
import numpy as np
import resnet50_labels
# import original modules
sys.path.append('../../util')
# logger
from logging import getLogger # noqa: E402
import webcamera_utils # noqa: E402
from classifier_utils import (plot_results, print_results, # noqa: E402
write_predictions)
from image_utils import imread # noqa: E402
from model_utils import check_and_download_models # noqa: E402
from arg_utils import get_base_parser, get_savepath, update_parser # noqa: E402
logger = getLogger(__name__)
# ======================
# Parameters 1
# ======================
MODEL_NAMES = ['resnet50.opt', 'resnet50', 'resnet50_pytorch']
TTA_NAMES = ['none', '1_crop', 'keep_aspect']
IMAGE_PATH = 'pizza.jpg'
IMAGE_HEIGHT = 224
IMAGE_WIDTH = 224
MAX_CLASS_COUNT = 3
SLEEP_TIME = 0
# ======================
# Arguemnt Parser Config
# ======================
parser = get_base_parser(
'Resnet50 ImageNet classification model', IMAGE_PATH, None
)
parser.add_argument(
'--arch', '-a', metavar='ARCH',
default='resnet50.opt', choices=MODEL_NAMES,
help=('model architecture: ' + ' | '.join(MODEL_NAMES) +
' (default: resnet50.opt)')
)
parser.add_argument(
'--tta', '-t', metavar='TTA',
default='none', choices=TTA_NAMES,
help=('tta scheme: ' + ' | '.join(TTA_NAMES) +
' (default: none)')
)
parser.add_argument(
'-w', '--write_prediction',
action='store_true',
help='Flag to output the prediction file.'
)
args = update_parser(parser)
if args.arch=="resnet50_pytorch":
IMAGE_RANGE = ailia.NETWORK_IMAGE_RANGE_IMAGENET
else:
IMAGE_RANGE = ailia.NETWORK_IMAGE_RANGE_S_INT8
if args.write_prediction:
MAX_CLASS_COUNT = 5
# ======================
# Parameters 2
# ======================
WEIGHT_PATH = args.arch + '.onnx'
MODEL_PATH = args.arch + '.onnx.prototxt'
REMOTE_PATH = 'https://storage.googleapis.com/ailia-models/resnet50/'
# ======================
# Utils
# ======================
def preprocess_image(img):
if len(img.shape) == 2:
img = np.expand_dims(img, axis=2)
if img.shape[2] == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
elif img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGRA)
if args.tta == "1_crop" or args.tta == "keep_aspect":
resize = 256
crop = 224
if args.tta == "keep_aspect":
resize = crop
pad = (resize - crop)//2
if img.shape[0] < img.shape[1]:
img = cv2.resize(img, (int(img.shape[1]*resize/img.shape[0]), resize))
img = img[pad:pad+crop,(img.shape[1]-crop)//2:(img.shape[1]-crop)//2+crop,:]
else:
img = cv2.resize(img, (resize, int(img.shape[0]*resize/img.shape[1])))
img = img[(img.shape[0]-crop)//2:(img.shape[0]-crop)//2+crop,pad:pad+crop,:]
img = img.copy()
return img
# ======================
# Main functions
# ======================
def recognize_from_image():
# net initialize
classifier = ailia.Classifier(
MODEL_PATH,
WEIGHT_PATH,
env_id=args.env_id,
format=ailia.NETWORK_IMAGE_FORMAT_RGB,
range=IMAGE_RANGE,
)
# input image loop
for image_path in args.input:
# prepare input data
logger.info(image_path)
img = imread(image_path, cv2.IMREAD_UNCHANGED)
img = preprocess_image(img)
# inference
logger.info('Start inference...')
if args.benchmark:
logger.info('BENCHMARK mode')
for i in range(args.benchmark_count):
start = int(round(time.time() * 1000))
classifier.compute(img, MAX_CLASS_COUNT)
end = int(round(time.time() * 1000))
logger.info(f'\tailia processing time {end - start} ms')
else:
classifier.compute(img, MAX_CLASS_COUNT)
# show results
print_results(classifier, resnet50_labels.imagenet_category)
# write prediction
if args.write_prediction:
savepath = get_savepath(args.savepath, image_path)
pred_file = '%s.txt' % savepath.rsplit('.', 1)[0]
write_predictions(pred_file, classifier, resnet50_labels.imagenet_category)
logger.info('Script finished successfully.')
def recognize_from_video():
# net initialize
classifier = ailia.Classifier(
MODEL_PATH,
WEIGHT_PATH,
env_id=args.env_id,
format=ailia.NETWORK_IMAGE_FORMAT_RGB,
range=IMAGE_RANGE,
)
capture = webcamera_utils.get_capture(args.video)
# create video writer if savepath is specified as video format
if args.savepath is not None:
f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
writer = webcamera_utils.get_writer(args.savepath, f_h, f_w)
else:
writer = None
frame_shown = False
while(True):
ret, frame = capture.read()
if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
break
if frame_shown and cv2.getWindowProperty('frame', cv2.WND_PROP_VISIBLE) == 0:
break
_, resized_frame = webcamera_utils.adjust_frame_size(
frame, IMAGE_HEIGHT, IMAGE_WIDTH
)
resized_frame = preprocess_image(resized_frame)
# inference
classifier.compute(resized_frame, MAX_CLASS_COUNT)
# get result
plot_results(frame, classifier, resnet50_labels.imagenet_category)
cv2.imshow('frame', frame)
frame_shown = True
time.sleep(SLEEP_TIME)
# save results
if writer is not None:
writer.write(frame)
capture.release()
cv2.destroyAllWindows()
if writer is not None:
writer.release()
logger.info('Script finished successfully.')
def main():
# model files check and download
check_and_download_models(WEIGHT_PATH, MODEL_PATH, REMOTE_PATH)
if args.video is not None:
# video mode
recognize_from_video()
else:
# image mode
recognize_from_image()
if __name__ == '__main__':
main()
|
f5040cc88008c8b125286c93781c74531a3ed787
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/quickjs/all/conanfile.py
|
8e68a544366f1d85f602af721a2435b46c9a29fc
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,991
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.tools.files import get, copy
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.microsoft import is_msvc
from conan.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.47.0"
class QuickJSConan(ConanFile):
name = "quickjs"
description = "QuickJS is a small and embeddable Javascript engine."
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://bellard.org/quickjs/"
topics = ("Javascript", "embeddable", "ES2020", "asynchronous")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"use_bignum": [True, False],
"dump_leaks": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"use_bignum" : True,
"dump_leaks": False,
}
def export_sources(self):
copy(self, "CMakeLists.txt", self.recipe_folder, self.export_sources_folder)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
try:
del self.options.fPIC
except Exception:
pass
try:
del self.settings.compiler.libcxx
except Exception:
pass
try:
del self.settings.compiler.cppstd
except Exception:
pass
def validate(self):
# TODO: there are forked repository to support MSVC. (https://github.com/c-smile/quickjspp)
if is_msvc(self):
raise ConanInvalidConfiguration(f"{self.ref} can not be built on Visual Studio and msvc.")
def layout(self):
cmake_layout(self, src_folder="src")
def source(self):
get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["QUICKJS_SRC_DIR"] = self.source_folder.replace("\\", "/")
tc.variables["USE_BIGNUM"] = self.options.use_bignum
tc.variables["DUMP_LEAKS"] = self.options.dump_leaks
tc.generate()
def build(self):
cmake = CMake(self)
cmake.configure(build_script_folder=os.path.join(self.source_folder, os.pardir))
cmake.build()
def package(self):
copy(self, "LICENSE", self.source_folder, os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
def package_info(self):
self.cpp_info.libs = ["quickjs"]
if self.options.use_bignum == True:
self.cpp_info.defines.append("CONFIG_BIGNUM")
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("dl")
self.cpp_info.system_libs.append("m")
self.cpp_info.system_libs.append("pthread")
|
9ca264de8ccfd0f41e72f4f432d870c926d3a217
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Configuration/Skimming/test/skimmingMatrix.py
|
a8baa0f7f99ddb2cc17dcd13146cfb7e50ce6a99
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 583
|
py
|
skimmingMatrix.py
|
from __future__ import print_function
import optparse
import os
usage="--list"
parser = optparse.OptionParser(usage)
parser.add_option("--GT")
parser.add_option("--option",default="")
(options,args)=parser.parse_args()
from Configuration.Skimming.autoSkim import autoSkim
for PD in autoSkim:
com='cmsDriver.py skim -s SKIM:%s --data --conditions %s --python_filenam skim_%s.py --magField AutoFromDBCurrent --no_exec %s'%(autoSkim[PD],options.GT,PD,options.option)
if 'cosmic' in PD.lower():
com+=' --scenario cosmics'
print(com)
os.system(com)
|
74a418af4c2f7b2f98f26b840d5c8a067ac19d23
|
57ad04cc8cbedd2ec38d4924860dc394f8234430
|
/tests/acceptance/steps/django_test_client.py
|
ca3c39c00859459f4f9b10a5a12dd774c7162d3c
|
[
"MIT"
] |
permissive
|
behave/behave-django
|
6e703f4deeca6d93371635765c828cc52a5c6658
|
5637bcdac5f3fc84fd2fdffc1ce3222af4744c7b
|
refs/heads/main
| 2023-08-30T23:14:18.627602
| 2023-08-24T01:56:40
| 2023-08-24T06:16:25
| 53,788,968
| 215
| 63
|
MIT
| 2023-08-24T06:16:26
| 2016-03-13T14:21:55
|
Python
|
UTF-8
|
Python
| false
| false
| 312
|
py
|
django_test_client.py
|
from behave import then, when
@when(u'I use django\'s test client to visit "{url}"')
def use_django_client(context, url):
context.response = context.test.client.get(url)
@then(u'it should return a successful response')
def it_should_be_successful(context):
assert context.response.status_code == 200
|
b5cf7b2ab3548c74041c195fc3d2c36c5e66446e
|
2388c65a174c03100347c6a2042a6a0662e67b36
|
/arrow/api.py
|
d8ed24b978506c40a32e8a4b35001ddd746bdfee
|
[
"Apache-2.0"
] |
permissive
|
arrow-py/arrow
|
0fd0897cbdc94207339439ac6b99e945ca93ecd8
|
74a759b88447b6ecd9fd5de610f272c8fb6130a2
|
refs/heads/master
| 2023-09-04T02:56:23.565103
| 2022-11-15T16:25:22
| 2022-11-15T16:25:22
| 6,750,871
| 2,410
| 175
|
Apache-2.0
| 2023-09-12T05:30:58
| 2012-11-18T20:23:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,755
|
py
|
api.py
|
"""
Provides the default implementation of :class:`ArrowFactory <arrow.factory.ArrowFactory>`
methods for use as a module API.
"""
from datetime import date, datetime
from datetime import tzinfo as dt_tzinfo
from time import struct_time
from typing import Any, List, Optional, Tuple, Type, Union, overload
from arrow.arrow import TZ_EXPR, Arrow
from arrow.constants import DEFAULT_LOCALE
from arrow.factory import ArrowFactory
# internal default factory.
_factory = ArrowFactory()
# TODO: Use Positional Only Argument (https://www.python.org/dev/peps/pep-0570/)
# after Python 3.7 deprecation
@overload
def get(
*,
locale: str = DEFAULT_LOCALE,
tzinfo: Optional[TZ_EXPR] = None,
normalize_whitespace: bool = False,
) -> Arrow:
... # pragma: no cover
@overload
def get(
*args: int,
locale: str = DEFAULT_LOCALE,
tzinfo: Optional[TZ_EXPR] = None,
normalize_whitespace: bool = False,
) -> Arrow:
... # pragma: no cover
@overload
def get(
__obj: Union[
Arrow,
datetime,
date,
struct_time,
dt_tzinfo,
int,
float,
str,
Tuple[int, int, int],
],
*,
locale: str = DEFAULT_LOCALE,
tzinfo: Optional[TZ_EXPR] = None,
normalize_whitespace: bool = False,
) -> Arrow:
... # pragma: no cover
@overload
def get(
__arg1: Union[datetime, date],
__arg2: TZ_EXPR,
*,
locale: str = DEFAULT_LOCALE,
tzinfo: Optional[TZ_EXPR] = None,
normalize_whitespace: bool = False,
) -> Arrow:
... # pragma: no cover
@overload
def get(
__arg1: str,
__arg2: Union[str, List[str]],
*,
locale: str = DEFAULT_LOCALE,
tzinfo: Optional[TZ_EXPR] = None,
normalize_whitespace: bool = False,
) -> Arrow:
... # pragma: no cover
def get(*args: Any, **kwargs: Any) -> Arrow:
"""Calls the default :class:`ArrowFactory <arrow.factory.ArrowFactory>` ``get`` method."""
return _factory.get(*args, **kwargs)
get.__doc__ = _factory.get.__doc__
def utcnow() -> Arrow:
"""Calls the default :class:`ArrowFactory <arrow.factory.ArrowFactory>` ``utcnow`` method."""
return _factory.utcnow()
utcnow.__doc__ = _factory.utcnow.__doc__
def now(tz: Optional[TZ_EXPR] = None) -> Arrow:
"""Calls the default :class:`ArrowFactory <arrow.factory.ArrowFactory>` ``now`` method."""
return _factory.now(tz)
now.__doc__ = _factory.now.__doc__
def factory(type: Type[Arrow]) -> ArrowFactory:
"""Returns an :class:`.ArrowFactory` for the specified :class:`Arrow <arrow.arrow.Arrow>`
or derived type.
:param type: the type, :class:`Arrow <arrow.arrow.Arrow>` or derived.
"""
return ArrowFactory(type)
__all__ = ["get", "utcnow", "now", "factory"]
|
99ae3a743e309adf704fe4886747c9325fc4a7bd
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/tests/utils/test_state.py
|
16153c7637d3a7e2c1baa68d48331309efe4324a
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
test_state.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.models import DAG
from airflow.models.dagrun import DagRun
from airflow.utils.session import create_session
from airflow.utils.state import DagRunState
from airflow.utils.types import DagRunType
from tests.models import DEFAULT_DATE
def test_dagrun_state_enum_escape():
"""
Make sure DagRunState.QUEUED is converted to string 'queued' when
referenced in DB query
"""
with create_session() as session:
dag = DAG(dag_id="test_dagrun_state_enum_escape", start_date=DEFAULT_DATE)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
state=DagRunState.QUEUED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
query = session.query(DagRun.dag_id, DagRun.state, DagRun.run_type,).filter(
DagRun.dag_id == dag.dag_id,
# make sure enum value can be used in filter queries
DagRun.state == DagRunState.QUEUED,
)
assert str(query.statement.compile(compile_kwargs={"literal_binds": True})) == (
"SELECT dag_run.dag_id, dag_run.state, dag_run.run_type \n"
"FROM dag_run \n"
"WHERE dag_run.dag_id = 'test_dagrun_state_enum_escape' AND dag_run.state = 'queued'"
)
rows = query.all()
assert len(rows) == 1
assert rows[0].dag_id == dag.dag_id
# make sure value in db is stored as `queued`, not `DagRunType.QUEUED`
assert rows[0].state == "queued"
session.rollback()
|
7a00f9e110e71d0606ac792e7986931a26c0bb6e
|
59864cbd213b5da6f50d6255b0a021564b3d5bd4
|
/challenges/Single-Sign-On/poller/for-release/machine.py
|
25a8815eadf6df81e60475be35b03393b705cbfe
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown",
"BSD-2-Clause"
] |
permissive
|
trailofbits/cb-multios
|
8af96a4fbc3b34644367faa135347f88e0e0d0a3
|
810d7b24b1f62f56ef49b148fe155b0d0629cad2
|
refs/heads/master
| 2023-09-05T03:56:20.229403
| 2022-12-27T15:47:54
| 2022-12-27T15:47:54
| 41,688,943
| 522
| 133
|
MIT
| 2023-06-29T02:47:13
| 2015-08-31T17:04:31
|
C
|
UTF-8
|
Python
| false
| false
| 7,079
|
py
|
machine.py
|
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
import string
import random
import itertools
import math
from struct import *
from collections import Counter
class TemplateGenerator(Actions):
def start(self):
self.token = pack('l', 4)
self.resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))
self.users = []
self.commands = ['REQR', 'LOUT', 'REVR', 'REVU']
def command_no_user_failure(self):
command = random.choice(self.commands)
user_id = random.randint(1,100000)
while Counter(elem[0][0] for elem in self.users)[user_id] > 0:
user_id = random.randint(1,100000)
user_id_str = pack('l', user_id)
self.write(command + user_id_str + self.token + self.resource + '\x07')
self.read(delim="\n", expect = "Command failed.\n")
def request_no_resource_failure(self):
user = random.choice(self.users)
user_id_str = pack('l', user[0][0])
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))
while Counter(elem[0] for elem in user[1])[resource] > 0:
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(5))
self.write("REQR" + user_id_str + self.token + resource + '\x07')
self.read(delim="\n", expect = "Please re-authenticate.\n")
def request_resource_wrong_token_failure(self):
auth_val = Variable('authval')
if len(filter(lambda x: x[1], self.users)) == 0:
self.auth_success_with_auth_val()
user = random.choice(filter(lambda x: x[1], self.users))
user_id_str = pack('l', user[0][0])
resource = random.choice(user[1])
self.write("REQR" + user_id_str + self.token + resource[0] + '\x07')
regex_str = 'Authentication failed for resource %s(.*)\x0a' % resource[0]
auth_val.set_re(regex_str, group=1)
self.read(delim="\n", assign=auth_val)
self.read(delim="\n", expect = "Please re-authenticate.\n")
def request_success(self):
my_token = Variable('token')
if len(filter(lambda x: x[1], self.users)) == 0:
self.auth_success_with_auth_val()
user = random.choice(filter(lambda x: x[1], self.users))
user_id_str = pack('l', user[0][0])
resource = random.choice(user[1])
self.write("REQR" + user_id_str)
self.write(resource[1])
self.write(resource[0] + '\x07')
regex_str = 'Access to %s is granted!(.*)\x0a' % resource[0]
my_token.set_re(regex_str, group=1)
self.read(delim="\n", assign=my_token)
def auth_failure_new_user(self):
user_id = random.randint(2,100000)
while Counter(elem[0][0] for elem in self.users)[user_id] > 0:
user_id = random.randint(2,100000)
user_id_str = pack('l', user_id)
varname = "user"+str(user_id)
variable = Variable(varname)
user_item = [user_id, variable]
self.users.append([user_item, []])
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
self.write("AUTH" + user_id_str + self.token + resource + '\x07')
regex_str = 'Authentication failed for resource %s(.*)\x0a' % resource
variable.set_re(regex_str, group=1)
self.read(delim="\n", assign=variable)
def auth_failure_current_user(self):
user = random.choice(self.users)
user_id_str = pack('l', user[0][0])
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
variable = user[0][1]
self.write("AUTH" + user_id_str + self.token + resource + '\x07')
regex_str = 'Authentication failed for resource %s(.*)\x0a' % resource
variable.set_re(regex_str, group=1)
self.read(delim="\n", assign=variable)
def auth_success_with_auth_val(self):
user = random.choice(self.users)
user_id_str = pack('l', user[0][0])
resource = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(9))
varname = resource+str(user[0][0])
variable = Variable(varname)
resource_item = [resource, variable]
user[1].append(resource_item)
self.write("AUTH" + user_id_str)
self.write(user[0][1])
self.write(resource + '\x07')
regex_str = 'Access to %s is granted!(.*)\x0a' % resource
variable.set_re(regex_str, group=1)
self.read(delim="\n", assign=variable)
def revoke_resource_success(self):
if len(filter(lambda x: x[1], self.users)) == 0:
self.auth_success_with_auth_val()
user = random.choice(filter(lambda x: x[1], self.users))
user_id_str = pack('l', user[0][0])
resource = random.choice(user[1])
user[1].remove(resource)
self.write("REVR" + user_id_str)
self.write(resource[1])
self.write(resource[0] + '\x07')
self.read(delim="\n", expect="Resource revoked.\n")
def revoke_resource_failure(self):
if len(filter(lambda x: x[1], self.users)) == 0:
self.auth_success_with_auth_val()
user = random.choice(filter(lambda x: x[1], self.users))
user_id_str = pack('l', user[0][0])
resource = random.choice(user[1])
token = self.token
self.write("REVR" + user_id_str)
self.write(token)
self.write(resource[0] + '\x07')
self.read(delim="\n", expect="Revocation denied.\n")
def logout(self):
if not self.users:
return self.quit()
user = random.choice(self.users)
user_id_str = pack('l', user[0][0])
self.users.remove(user)
self.write("LOUT" + user_id_str + self.token + self.resource + '\x07')
self.read(delim="\n", expect="Logged out successfully.\n")
def quit(self):
return -1
|
24e6fb5974019e08b95e0e5e411d2fd5fa607b5a
|
38d86234ef4ba4ed5ac3bf585bcff8615004d2a6
|
/ssseg/modules/models/segmentors/pspnet/__init__.py
|
c537604462f5fe493767070b9160a31d7be51044
|
[
"Apache-2.0"
] |
permissive
|
SegmentationBLWX/sssegmentation
|
e57e7a071b03214c55248c4b1e64c85796744bf1
|
fe3d0dac83055b728fe3c5df964507fc7cc4948c
|
refs/heads/main
| 2023-08-05T02:49:57.370911
| 2023-08-01T13:49:17
| 2023-08-01T13:49:17
| 306,540,019
| 725
| 97
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
__init__.py
|
'''initialize'''
from .pspnet import PSPNet
from .ppm import PyramidPoolingModule
|
934cb4ef0b70cdc4365ccbd82c48803334819726
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/11_动态规划/acwingdp专项练习/计数类dp/N 个节点的无向连通图有多少个.py
|
b976432c59a8837985df9b8d6a0de7af5ee50c62
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
N 个节点的无向连通图有多少个.py
|
# N 个节点的无向连通图有多少个
# 1≤N≤50
# https://www.acwing.com/solution/content/87369/
# https://oeis.org/search?q=1%2C1%2C4%2C38&language=english&go=Search
|
f7544e6a4e0b9395da12f455521c8eaf25ed516f
|
82db07aef13ed906dd52816f01c8e2f4a5825295
|
/tests/basic/test_basic.py
|
b546b541a85a777d29b106a641b5fb115b46f26c
|
[
"BSD-3-Clause"
] |
permissive
|
jrialland/python-astar
|
4a2e1c1564d85be724dbd79f8ea8f06df0b7ba55
|
8efaf3873f6ef9a762ec545d82f4e40ada763a37
|
refs/heads/master
| 2023-08-08T15:51:07.988273
| 2023-08-01T10:18:15
| 2023-08-01T10:18:15
| 22,218,925
| 183
| 74
|
BSD-3-Clause
| 2023-09-13T21:58:42
| 2014-07-24T15:44:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,758
|
py
|
test_basic.py
|
import unittest
from astar import AStar
class BasicAStar(AStar):
def __init__(self, nodes):
self.nodes = nodes
def neighbors(self, n):
for n1, d in self.nodes[n]:
yield n1
def distance_between(self, n1, n2):
for n, d in self.nodes[n1]:
if n == n2:
return d
def heuristic_cost_estimate(self, current, goal):
return 1
def is_goal_reached(self, current, goal):
return current == goal
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A': [('B', 100), ('C', 20)],
'C': [('D', 20)],
'D': [('B', 20)]}
path = BasicAStar(nodes).astar('A', 'B')
self.assertIsNotNone(path)
if path:
path = list(path)
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
def test_issue_15(self):
"""This test case reproduces https://github.com/jrialland/python-astar/issues/15.
B has no neighbors, therefore the computation should return None and not raise an exception.
"""
node = {
'A': [('B', 200000)],
'C': [('D', 200000)],
'D': [('E', 200000)],
'E': [('F', 200000)],
'B': [],
'F': []
}
path = BasicAStar(node).astar('A', 'D')
self.assertIsNone(path)
if __name__ == '__main__':
unittest.main()
|
254ae87e14b0237df8b68798e90ce88467c3c755
|
76908a765b4b430167bfeb6a12ecc3e247552c2a
|
/widgets/pose_control/generic_daq_slider_ui.py
|
fbbbfe0512bdbb02880966346cfe8fc458ea6b12
|
[
"MIT"
] |
permissive
|
mithi/hexapod-robot-simulator
|
3159a345645d2d82451c45ddc17cbbc4ed5ab5ee
|
47eda0bdd3556220fa0cfcb197fbc0be1fed4c49
|
refs/heads/master
| 2023-09-06T06:26:48.099190
| 2023-04-09T11:53:51
| 2023-04-09T11:53:51
| 240,484,880
| 759
| 114
|
MIT
| 2023-05-01T21:37:40
| 2020-02-14T10:37:49
|
Python
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
generic_daq_slider_ui.py
|
from widgets.pose_control.joint_widget_maker import (
make_all_joint_widgets,
make_daq_slider,
)
from widgets.pose_control.kinematics_section_maker import make_section
# ................................
# COMPONENTS
# ................................
widgets = make_all_joint_widgets(joint_input_function=make_daq_slider)
KINEMATICS_WIDGETS_SECTION = make_section(
widgets, style_to_use={"padding": "0 0 0 3em"}
)
|
9ae96e10aaa310875adc834e11c54bb4e6d3c4f2
|
f43a1f64cb5c483fad6782c866508d8724622f24
|
/PyViCare/PyViCare.py
|
163b54bd9ea0614928124ece31f7ace355b1ea04
|
[
"Apache-2.0"
] |
permissive
|
somm15/PyViCare
|
88bfca043e739f7b662be0dc0a05e443e7a31825
|
8ba411483a865e074d1146fd1b8b7a8c4f4be122
|
refs/heads/master
| 2023-08-31T11:34:36.605842
| 2023-06-08T20:17:01
| 2023-06-08T20:17:01
| 164,931,903
| 110
| 89
|
Apache-2.0
| 2023-09-11T22:18:11
| 2019-01-09T20:20:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,081
|
py
|
PyViCare.py
|
import logging
from datetime import datetime
from PyViCare.PyViCareAbstractOAuthManager import AbstractViCareOAuthManager
from PyViCare.PyViCareBrowserOAuthManager import ViCareBrowserOAuthManager
from PyViCare.PyViCareCachedService import ViCareCachedService
from PyViCare.PyViCareDeviceConfig import PyViCareDeviceConfig
from PyViCare.PyViCareOAuthManager import ViCareOAuthManager
from PyViCare.PyViCareService import ViCareDeviceAccessor, ViCareService
from PyViCare.PyViCareUtils import PyViCareInvalidDataError
logger = logging.getLogger('ViCare')
logger.addHandler(logging.NullHandler())
""""Viessmann ViCare API Python tools"""
class PyViCare:
def __init__(self) -> None:
self.cacheDuration = 60
def setCacheDuration(self, cache_duration):
self.cacheDuration = int(cache_duration)
def initWithCredentials(self, username: str, password: str, client_id: str, token_file: str):
self.initWithExternalOAuth(ViCareOAuthManager(
username, password, client_id, token_file))
def initWithExternalOAuth(self, oauth_manager: AbstractViCareOAuthManager) -> None:
self.oauth_manager = oauth_manager
self.__loadInstallations()
def initWithBrowserOAuth(self, client_id: str, token_file: str) -> None:
self.initWithExternalOAuth(ViCareBrowserOAuthManager(client_id, token_file))
def __buildService(self, accessor, roles):
if self.cacheDuration > 0:
return ViCareCachedService(self.oauth_manager, accessor, roles, self.cacheDuration)
else:
return ViCareService(self.oauth_manager, accessor, roles)
def __loadInstallations(self):
installations = self.oauth_manager.get(
"/equipment/installations?includeGateways=true")
if "data" not in installations:
logger.error("Missing 'data' property when fetching installations")
raise PyViCareInvalidDataError(installations)
data = installations['data']
self.installations = Wrap(data)
self.devices = list(self.__extract_devices())
def __extract_devices(self):
for installation in self.installations:
for gateway in installation.gateways:
for device in gateway.devices:
if device.deviceType != "heating" and device.deviceType != "zigbee" and device.deviceType != "vitoconnect" and device.deviceType != "electricityStorage" and device.deviceType != "EEBus" and device.deviceType != "hems" and device.deviceType != "tcu":
continue # we are only interested in heating, photovoltaic, electricityStorage and hems devices
if device.id == "gateway" and device.deviceType == "vitoconnect":
device.id = "0" # vitoconnect has no device id, so we use 0
if device.id == "gateway" and device.deviceType == "tcu":
device.id = "0" # tcu has no device id, so we use 0
if device.id == "HEMS" and device.deviceType == "hems":
device.id = "0" # hems has no device id, so we use 0
if device.id == "EEBUS" and device.deviceType == "EEBus":
device.id = "0" # EEBus has no device id,
accessor = ViCareDeviceAccessor(
installation.id, gateway.serial, device.id)
service = self.__buildService(accessor, device.roles)
logger.info(f"Device found: {device.modelId}")
yield PyViCareDeviceConfig(service, device.id, device.modelId, device.status)
class DictWrap(object):
def __init__(self, d):
for k, v in d.items():
setattr(self, k, Wrap(v))
def Wrap(v):
if isinstance(v, list):
return [Wrap(x) for x in v]
if isinstance(v, dict):
return DictWrap(v)
if isinstance(v, str) and len(v) == 24 and v[23] == 'Z' and v[10] == 'T':
return datetime.strptime(v, '%Y-%m-%dT%H:%M:%S.%f%z')
else:
return v
|
397a97e894be370c958392180acfa9764f2fdd32
|
9efca95a55cb4df52d895d42f1ec10331516a734
|
/tools/c7n_left/c7n_left/test.py
|
c95f38b2c94081ac2756b7333b47f1082b37176a
|
[
"Apache-2.0"
] |
permissive
|
cloud-custodian/cloud-custodian
|
519e602abe00c642786441b64cc40857ef5bc9de
|
27563cf4571040f923124e1acb2463f11e372225
|
refs/heads/main
| 2023-09-04T10:54:55.963703
| 2023-09-01T17:40:17
| 2023-09-01T17:40:17
| 52,837,350
| 3,327
| 1,096
|
Apache-2.0
| 2023-09-14T14:03:30
| 2016-03-01T01:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 7,178
|
py
|
test.py
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
#
import operator
import time
from c7n.config import Config
from c7n.data import Data as DataMatcher
from c7n.utils import load_file
from c7n.output import NullTracer
from .core import CollectionRunner
from .output import RichCli, Output
class TestRunner:
def __init__(self, policies, options, reporter):
self.policies = policies
self.options = options
self.reporter = reporter
self.unmatched_policies = set()
self.unmatched_tests = set()
def run(self) -> bool:
policy_tests = self.get_policy_tests()
self.reporter.on_tests_discovered(self, policy_tests)
for test in sorted(policy_tests, key=operator.attrgetter("name")):
self.run_test(test)
self.reporter.on_test_result(test)
self.reporter.on_tests_complete()
return bool(self.reporter.failures)
def run_test(self, test) -> bool:
checker = TestChecker(test, self.options)
runner = CollectionRunner(
[test.policy],
self.options.copy(
exec_filter=self.options.get("exec_filter"), source_dir=test.test_dir
),
checker,
)
runner.run()
def get_policy_tests(self):
policy_map = {p.name: p for p in self.policies}
test_map = {t.name: t for t in self.get_tests(self.options.source_dir) if t}
self.unmatched_policies = set(policy_map).difference(test_map)
self.unmatched_tests = set(test_map).difference(policy_map)
matched = set(policy_map).intersection(test_map)
for name in matched:
test_map[name].set_policy(policy_map[name])
return [test_map[name] for name in matched]
def get_tests(self, source_dir):
tests = []
for test_dir in source_dir.iterdir():
if not test_dir.is_dir():
continue
plan_candidates = [
test_dir / "left.plan.json",
test_dir / "left.plan.yaml",
test_dir / "left.plan.yml",
]
for c in plan_candidates:
if not c.exists():
continue
tests.append(self.load_plan(test_dir, c))
return tests
def load_plan(self, test_dir, plan_path):
try:
plan = load_file(plan_path)
return Test(plan, test_dir)
except Exception as e:
self.reporter.on_test_load_error(plan_path, e)
class Test:
def __init__(self, plan_data, test_dir):
self.plan = TestPlan(plan_data)
self.test_dir = test_dir
self.policy = None
@property
def name(self):
return self.test_dir.name
def set_policy(self, policy):
self.policy = policy
def check_execution_result(self, result):
self.plan.match(result)
def get_test_result(self):
result = self.plan.get_test_result()
result["name"] = self.name
return result
class TestPlan:
def __init__(self, plan_data):
self.data = plan_data
self.used = set()
self.matchers = []
self.unmatched = []
self.initialize_matchers()
def get_test_result(self):
return {
"success": len(self.used) == len(self.matchers) and not self.unmatched,
"stat_checks": len(self.matchers),
"stat_used": len(self.used),
"stat_unmatched": len(self.unmatched),
"unmatched": self.unmatched,
"unused": [t for idx, t in enumerate(self.data) if idx not in self.used],
}
def initialize_matchers(self):
cfg = Config.empty(session_factory=None, tracer=NullTracer(None), options=None)
matchers = []
for match_block in self.data:
matcher = DataMatcher(cfg, {"filters": [{k: v} for k, v in match_block.items()]})
for i in matcher.iter_filters():
i.annotate = False
matchers.append(matcher)
self.matchers = matchers
def match(self, result):
found = False
for idx, matcher in enumerate(self.matchers):
if idx in self.used:
continue
if matcher.filter_resources([result.as_dict()]):
self.used.add(idx)
found = True
break
if found is False:
self.unmatched.append(result.as_dict())
class TestReporter(RichCli):
def __init__(self, ctx, config):
super().__init__(ctx, config)
self.start_time = time.time()
self.failures = 0
self.total = 0
def on_tests_discovered(self, runner, tests):
header = f"Discovered {len(tests)} Tests"
if runner.unmatched_policies:
header += f" - {len(runner.unmatched_policies)}/{len(runner.policies)}"
header += " Policies Untested"
if runner.unmatched_tests and not self.config.get("filters"):
header += f" - [red]{len(runner.unmatched_tests)} Unused Tests"
self.console.print(header)
if self.config.get("verbose", True) and not self.config.get("filters"):
for p in runner.unmatched_policies:
self.console.print(f"no test for {p}")
for t in runner.unmatched_tests:
self.console.print(f"no policy for {t}")
def on_tests_complete(self):
status = f"{self.total} "
status += self.total > 1 and "Tests" or "Test"
status += " Complete (%0.2fs)" % (time.time() - self.start_time)
if self.failures:
status += f" [red]{self.failures} "
status += self.failures > 1 and "Failures" or "Failure"
status += "[/red]"
self.console.print(status)
def on_test_load_error(self, test_path, error):
self.console.print(f"[yellow]test load error[yellow] {test_path} - {error}")
def on_test_result(self, test: Test):
self.total += 1
result = test.get_test_result()
if result["success"]:
status = f"[green]Success[/green] {result['name']}"
status += f" - {result['stat_checks']} checks"
self.console.print(status)
return
self.failures += 1
status = f"[red]Failure[/red] {result['name']}"
if result["stat_unmatched"]:
status += f" - {result['stat_unmatched']} findings unmatched"
if result["unused"]:
status += f" - {len(result['unused'])} Checks not used"
self.console.print(status)
if result["unused"]:
self.console.print("Unused Checks")
for u in result["unused"]:
self.console.print(u)
if result["unmatched"]:
self.console.print("Unmatched Findings")
for unmatched in result["unmatched"]:
unmatched = dict(unmatched)
unmatched.pop("policy")
self.console.print(unmatched)
self.console.print("")
class TestChecker(Output):
def on_results(self, results):
for r in results:
self.ctx.check_execution_result(r)
|
7eca00e3fe2d64c55900410d97d971f14bfb8fb8
|
e6a5c54d3ba5e8ec69c1aebc7ba4992b1e3fdb68
|
/test/test_indexer.py
|
c0215644dac8a4979cb74d9c12df6a668545d98a
|
[
"MIT"
] |
permissive
|
arun1729/cog
|
6697bb5d6120705712bc84d297a7895020a18b06
|
24a362880fb7bc501787dd695fcdf979e77212c7
|
refs/heads/master
| 2023-08-04T04:54:30.743451
| 2023-07-30T02:03:06
| 2023-07-30T02:03:06
| 111,333,972
| 258
| 25
|
MIT
| 2023-07-30T02:03:07
| 2017-11-19T21:35:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
test_indexer.py
|
from cog.core import Record
from cog.core import Table
from cog import config
import logging
import os
import shutil
from logging.config import dictConfig
import string
import random
import unittest
DIR_NAME = "TestIndexer"
class TestIndexer(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = "/tmp/"+DIR_NAME+"/test_table/"
if not os.path.exists(path):
os.makedirs(path)
config.CUSTOM_COG_DB_PATH = "/tmp/"+DIR_NAME
print("*** " + config.CUSTOM_COG_DB_PATH + "\n")
def test_indexer_put_get(self):
if not os.path.exists("/tmp/"+DIR_NAME+"/test_table/"):
os.makedirs("/tmp/"+DIR_NAME+"/test_table/")
config.COG_HOME = DIR_NAME
print("*** " + config.COG_HOME + "\n")
dictConfig(config.logging_config)
logger = logging.getLogger()
table = Table("testdb","test_table","test_xcvzdfsadx", config, logger)
store = table.store
indexer = table.indexer.index_list[0]
max_range=100
for i in range(max_range):
key= ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
value= ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(100))
expected_data = Record(key,value)
position=store.save(expected_data)
indexer.put(expected_data.key,position,store)
returned_data=indexer.get(expected_data.key, store)
print("indexer retrieved data: "+str(returned_data))
self.assertTrue(expected_data.is_equal_val(returned_data))
print("Test progress: "+str(i*100.0/max_range))
c = 0
scanner = indexer.scanner(store)
for r in scanner:
c += 1
self.assertEqual(max_range, c)
indexer.close()
store.close()
table.close()
@classmethod
def tearDownClass(cls):
shutil.rmtree("/tmp/"+DIR_NAME+"/")
print("*** deleted test data: " + "/tmp/"+DIR_NAME)
if __name__ == '__main__':
unittest.main()
|
805736b425d1278574733cffdbad0303bd428b8c
|
ac061521667ca29d3e722fac91c553f5a2357238
|
/Chapter_12/12-6/echopong
|
a7a5e8219195f351a37b35f076341c197cbfa8ae
|
[] |
no_license
|
stanleylst/ansibleUI
|
41c5e1bacac818af5d6416fe14f1a1f28a2877a7
|
629ff046452fc85f44153bbc18ef190cb1a64ed0
|
refs/heads/master
| 2020-05-22T00:00:13.747773
| 2019-01-09T08:35:48
| 2019-01-09T08:35:48
| 55,881,270
| 122
| 99
| null | 2018-06-20T06:04:24
| 2016-04-10T04:46:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 442
|
echopong
|
#!/usr/bin/python
from ansible.module_utils.basic import *
import os
module = AnsibleModule(
argument_spec = dict(
args=dict(required=True)),
supports_check_mode=True
)
args = module.params['args']
rc = os.system('echo {0}'.format(args))
if rc == 0:
result = dict(echo=args, changed=True)
module.exit_json(**result)
else:
module.fail_json(msg="errors happened")
|
|
2004ea83426510457f4975d4646dc1076f444bdb
|
fe62120411ed637989e584294150637c48565522
|
/hubspot3/owners.py
|
f213a01ccb90a7620579baa7512586eebe4aa628
|
[
"MIT"
] |
permissive
|
jpetrucciani/hubspot3
|
a8fbeb7c30b20c4c9858a41753167b73bdbaf5df
|
f8806f3ccc89ff1ec4468319ae742dc6dd96a819
|
refs/heads/master
| 2023-06-24T16:17:01.003178
| 2023-06-15T22:06:26
| 2023-06-15T22:06:26
| 88,071,177
| 147
| 84
|
MIT
| 2023-06-15T21:58:13
| 2017-04-12T16:03:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,185
|
py
|
owners.py
|
"""
hubspot owners api
"""
from hubspot3.crm_associations import CRMAssociationsClient
from hubspot3.base import BaseClient
OWNERS_API_VERSION = "v2"
class OwnersClient(BaseClient):
"""
hubspot3 Owners client
:see: https://developers.hubspot.com/docs/methods/owners/owners_overview
"""
def _get_path(self, subpath):
"""get the full api url for the given subpath on this client"""
return f"owners/{OWNERS_API_VERSION}/owners"
def get_owners(self, **options):
"""Only returns the list of owners, does not include additional metadata"""
return self._call("owners", **options)
def get_owner_name_by_id(self, owner_id: str, **options) -> str:
"""Given an id of an owner, return their name"""
owner_name = "value_missing"
owners = self.get_owners(**options)
for owner in owners:
if int(owner["ownerId"]) == int(owner_id):
owner_name = f"{owner['firstName']} {owner['lastName']}"
return owner_name
def get_owner_email_by_id(self, owner_id: str, **options) -> str:
"""given an id of an owner, return their email"""
owner_email = "value_missing"
owner = self.get_owner_by_id(owner_id, **options)
if owner:
owner_email = owner["email"]
return owner_email
def get_owner_by_id(self, owner_id, **options):
"""Retrieve an owner by its id."""
owners = self.get_owners(**options)
for owner in owners:
if int(owner["ownerId"]) == int(owner_id):
return owner
return None
def get_owner_by_email(self, owner_email: str, **options):
"""
Retrieve an owner by its email.
"""
owners = self.get_owners(method="GET", params={"email": owner_email}, **options)
if owners:
return owners[0]
return None
def link_owner_to_company(self, owner_id, company_id):
"""
Link an owner to a company by using their ids.
"""
associations_client = CRMAssociationsClient(**self.credentials)
return associations_client.link_owner_to_company(owner_id, company_id)
|
8d0602271d9af459fed60723a498c88e26c9d900
|
fdb9bdc6c4ab2f14ba71e544493706d5e275899f
|
/fhir/resources/R4B/tests/test_regulatedauthorization.py
|
e2c96ddbedbf51702e05e2304f5e0c2e2f737a94
|
[
"BSD-3-Clause"
] |
permissive
|
nazrulworld/fhir.resources
|
6ae8aea8180c611b0c5050759c6dcdf63e4cb061
|
1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3
|
refs/heads/main
| 2023-08-30T18:27:27.277249
| 2023-07-03T19:57:06
| 2023-07-03T19:57:06
| 165,297,877
| 256
| 83
|
NOASSERTION
| 2023-08-24T15:34:05
| 2019-01-11T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 5,638
|
py
|
test_regulatedauthorization.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/RegulatedAuthorization
Release: R4B
Version: 4.3.0
Build ID: c475c22
Last updated: 2022-05-28T12:47:40.239+10:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import regulatedauthorization
def impl_regulatedauthorization_1(inst):
assert inst.holder.display == "EquiliDrugCo Holdings Inc."
assert inst.holder.reference == "Organization/EqlidrugCo"
assert inst.id == "basic-drug-auth"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.regulator.display == "FDA"
assert inst.regulator.reference == "Organization/FDA"
assert inst.status.coding[0].code == "active"
assert inst.statusDate == fhirtypes.DateTime.validate("2016-01-01")
assert inst.subject[0].reference == "MedicinalProductDefinition/equilidem"
assert inst.text.status == "generated"
assert inst.type.text == "Regulatory Drug Marketing Approval"
def test_regulatedauthorization_1(base_settings):
"""No. 1 tests collection for RegulatedAuthorization.
Test File: regulatedauthorization-example-basic-drug-auth.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "regulatedauthorization-example-basic-drug-auth.json"
)
inst = regulatedauthorization.RegulatedAuthorization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "RegulatedAuthorization" == inst.resource_type
impl_regulatedauthorization_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "RegulatedAuthorization" == data["resourceType"]
inst2 = regulatedauthorization.RegulatedAuthorization(**data)
impl_regulatedauthorization_1(inst2)
def impl_regulatedauthorization_2(inst):
assert inst.case.application[0].dateDateTime == fhirtypes.DateTime.validate(
"2015-08-01"
)
assert (
inst.case.application[0].identifier.system
== "http://ema.europa.eu/example/applicationidentifier-number"
)
assert inst.case.application[0].identifier.value == "IA38G"
assert (
inst.case.application[0].type.coding[0].code
== "GroupTypeIAVariationNotification"
)
assert inst.case.application[0].type.coding[0].system == (
"http://ema.europa.eu/example/marketingAuthorisationApplicati" "onType"
)
assert inst.case.application[1].dateDateTime == fhirtypes.DateTime.validate(
"2014-09-01"
)
assert (
inst.case.application[1].identifier.system
== "http://ema.europa.eu/example/applicationidentifier-number"
)
assert inst.case.application[1].identifier.value == "IA38F"
assert (
inst.case.application[1].type.coding[0].code
== "GroupTypeIAVariationNotification"
)
assert inst.case.application[1].type.coding[0].system == (
"http://ema.europa.eu/example/marketingAuthorisationApplicati" "onType"
)
assert inst.case.datePeriod.end == fhirtypes.DateTime.validate("2015-08-21")
assert inst.case.datePeriod.start == fhirtypes.DateTime.validate("2014-09-02")
assert (
inst.case.identifier.system
== "http://ema.europa.eu/example/procedureidentifier-number"
)
assert inst.case.identifier.value == "EMEA/H/C/009999/IA/0099/G"
assert inst.case.type.coding[0].code == "VariationTypeIA"
assert inst.case.type.coding[0].system == (
"http://ema.europa.eu/example/marketingAuthorisationProcedure" "Type"
)
assert inst.holder.reference == "Organization/example"
assert inst.id == "example"
assert (
inst.identifier[0].system
== "http://ema.europa.eu/example/marketingAuthorisationNumber"
)
assert inst.identifier[0].value == "EU/1/11/999/001"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.region[0].coding[0].code == "EU"
assert inst.region[0].coding[0].system == "http://ema.europa.eu/example/country"
assert inst.regulator.reference == "Organization/example"
assert inst.status.coding[0].code == "active"
assert (
inst.status.coding[0].system
== "http://ema.europa.eu/example/authorisationstatus"
)
assert inst.statusDate == fhirtypes.DateTime.validate("2015-01-14")
assert inst.text.status == "generated"
assert inst.validityPeriod.end == fhirtypes.DateTime.validate("2020-05-20")
assert inst.validityPeriod.start == fhirtypes.DateTime.validate("2014-09-03")
def test_regulatedauthorization_2(base_settings):
"""No. 2 tests collection for RegulatedAuthorization.
Test File: regulatedauthorization-example.json
"""
filename = (
base_settings["unittest_data_dir"] / "regulatedauthorization-example.json"
)
inst = regulatedauthorization.RegulatedAuthorization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "RegulatedAuthorization" == inst.resource_type
impl_regulatedauthorization_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "RegulatedAuthorization" == data["resourceType"]
inst2 = regulatedauthorization.RegulatedAuthorization(**data)
impl_regulatedauthorization_2(inst2)
|
c49eae5791e5e0e761c4b340547bdfb81a397975
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/security/azure-mgmt-security/azure/mgmt/security/v2022_03_01/models/_models_py3.py
|
c621e73948f6a297a3b424bcc81e288106482bad
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 8,205
|
py
|
_models_py3.py
|
# coding=utf-8
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
class CloudErrorBody(_serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.security.v2022_03_01.models.CloudErrorBody]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~azure.mgmt.security.v2022_03_01.models.ErrorAdditionalInfo]
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
"target": {"readonly": True},
"details": {"readonly": True},
"additional_info": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[CloudErrorBody]"},
"additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorAdditionalInfo(_serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: JSON
"""
_validation = {
"type": {"readonly": True},
"info": {"readonly": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"info": {"key": "info", "type": "object"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.type = None
self.info = None
class Resource(_serialization.Model):
"""Describes an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class Pricing(Resource):
"""Microsoft Defender for Cloud is provided in two pricing tiers: free and standard, with the
standard tier available with a trial period. The standard tier offers advanced security
capabilities, while the free tier offers basic security features.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar pricing_tier: The pricing tier value. Microsoft Defender for Cloud is provided in two
pricing tiers: free and standard, with the standard tier available with a trial period. The
standard tier offers advanced security capabilities, while the free tier offers basic security
features. Known values are: "Free" and "Standard".
:vartype pricing_tier: str or ~azure.mgmt.security.v2022_03_01.models.PricingTier
:ivar sub_plan: The sub-plan selected for a Standard pricing configuration, when more than one
sub-plan is available. Each sub-plan enables a set of security features. When not specified,
full plan is applied.
:vartype sub_plan: str
:ivar free_trial_remaining_time: The duration left for the subscriptions free trial period - in
ISO 8601 format (e.g. P3Y6M4DT12H30M5S).
:vartype free_trial_remaining_time: ~datetime.timedelta
:ivar deprecated: Optional. True if the plan is deprecated. If there are replacing plans they
will appear in ``replacedBy`` property.
:vartype deprecated: bool
:ivar replaced_by: Optional. List of plans that replace this plan. This property exists only if
this plan is deprecated.
:vartype replaced_by: list[str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"free_trial_remaining_time": {"readonly": True},
"deprecated": {"readonly": True},
"replaced_by": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"pricing_tier": {"key": "properties.pricingTier", "type": "str"},
"sub_plan": {"key": "properties.subPlan", "type": "str"},
"free_trial_remaining_time": {"key": "properties.freeTrialRemainingTime", "type": "duration"},
"deprecated": {"key": "properties.deprecated", "type": "bool"},
"replaced_by": {"key": "properties.replacedBy", "type": "[str]"},
}
def __init__(
self,
*,
pricing_tier: Optional[Union[str, "_models.PricingTier"]] = None,
sub_plan: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword pricing_tier: The pricing tier value. Microsoft Defender for Cloud is provided in two
pricing tiers: free and standard, with the standard tier available with a trial period. The
standard tier offers advanced security capabilities, while the free tier offers basic security
features. Known values are: "Free" and "Standard".
:paramtype pricing_tier: str or ~azure.mgmt.security.v2022_03_01.models.PricingTier
:keyword sub_plan: The sub-plan selected for a Standard pricing configuration, when more than
one sub-plan is available. Each sub-plan enables a set of security features. When not
specified, full plan is applied.
:paramtype sub_plan: str
"""
super().__init__(**kwargs)
self.pricing_tier = pricing_tier
self.sub_plan = sub_plan
self.free_trial_remaining_time = None
self.deprecated = None
self.replaced_by = None
class PricingList(_serialization.Model):
"""List of pricing configurations response.
All required parameters must be populated in order to send to Azure.
:ivar value: List of pricing configurations. Required.
:vartype value: list[~azure.mgmt.security.v2022_03_01.models.Pricing]
"""
_validation = {
"value": {"required": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[Pricing]"},
}
def __init__(self, *, value: List["_models.Pricing"], **kwargs: Any) -> None:
"""
:keyword value: List of pricing configurations. Required.
:paramtype value: list[~azure.mgmt.security.v2022_03_01.models.Pricing]
"""
super().__init__(**kwargs)
self.value = value
|
f1c5d824034c721d4e791ba79398e76ddb263f0d
|
d1c2d00078520cd556f60b7213c27856f8b3460d
|
/playground/infrastructure/helper.py
|
f211650f7bba3f6544969daf462f75b0b271e757
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf",
"Apache-2.0",
"Python-2.0"
] |
permissive
|
apache/beam
|
ed11b9e043465c720659eac20ac71b5b171bfa88
|
6d5048e05087ea54abc889ce402ae2a0abb9252b
|
refs/heads/master
| 2023-09-04T07:41:07.002653
| 2023-09-01T23:01:05
| 2023-09-01T23:01:05
| 50,904,245
| 7,061
| 4,522
|
Apache-2.0
| 2023-09-14T21:43:38
| 2016-02-02T08:00:06
|
Java
|
UTF-8
|
Python
| false
| false
| 13,598
|
py
|
helper.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common helper module for CI/CD Steps
"""
import asyncio
import logging
import os
import urllib.parse
from pathlib import PurePath
from typing import List, Optional, Dict
from api.v1 import api_pb2
import pydantic
import yaml
from api.v1.api_pb2 import (
SDK_UNSPECIFIED,
STATUS_UNSPECIFIED,
Sdk,
STATUS_VALIDATING,
STATUS_PREPARING,
STATUS_COMPILING,
STATUS_EXECUTING,
PRECOMPILED_OBJECT_TYPE_UNIT_TEST,
PRECOMPILED_OBJECT_TYPE_KATA,
PRECOMPILED_OBJECT_TYPE_UNSPECIFIED,
PRECOMPILED_OBJECT_TYPE_EXAMPLE,
PrecompiledObjectType,
)
from config import Config, TagFields, PrecompiledExampleType
from grpc_client import GRPCClient
from constants import BEAM_ROOT_DIR_ENV_VAR_KEY
from models import Example, Tag, SdkEnum, Dataset
def _check_no_nested(subdirs: List[str]):
"""
Check there're no nested subdirs
Sort alphabetically and compare the pairs of adjacent items
using pathlib.PurePath: we don't want fs calls in this check
"""
sorted_subdirs = sorted(PurePath(s) for s in subdirs)
for dir1, dir2 in zip(sorted_subdirs, sorted_subdirs[1:]):
if dir1 in [dir2, *dir2.parents]:
raise ValueError(f"{dir2} is a subdirectory of {dir1}")
def find_examples(root_dir: str, subdirs: List[str], sdk: SdkEnum) -> List[Example]:
"""
Find and return beam examples.
Search throws all child files of work_dir directory files with beam tag:
Beam-playground:
name: NameOfExample
description: Description of NameOfExample.
multifile: false
default_example: false
context_line: 10
categories:
- category-1
- category-2
pipeline_options: --inputFile your_file --outputFile your_output_file
complexity: MEDIUM
tags:
- example
If some example contains beam tag with incorrect format raise an error.
Args:
root_dir: project root dir
subdirs: sub-directories where to search examples.
sdk: sdk that using to find examples for the specific sdk.
Returns:
List of Examples.
"""
has_errors = False
examples = []
_check_no_nested(subdirs)
for subdir in subdirs:
subdir = os.path.join(root_dir, subdir)
logging.info("subdir: %s", subdir)
for root, _, files in os.walk(subdir):
for filename in files:
filepath = os.path.join(root, filename)
try:
try:
example = _load_example(
filename=filename, filepath=filepath, sdk=sdk
)
if example is not None:
examples.append(example)
except pydantic.ValidationError as err:
if len(err.errors()) > 1:
raise
if err.errors()[0]["msg"] == "multifile is True but no files defined":
logging.warning("incomplete multifile example ignored %s", filepath)
continue
raise
except Exception:
logging.exception("error loading example at %s", filepath)
has_errors = True
if has_errors:
raise ValueError(
"Some of the beam examples contain beam playground tag with "
"an incorrect format"
)
return examples
def get_tag(filepath: PurePath) -> Optional[Tag]:
"""
Parse file by filepath and find beam tag
Args:
filepath: path of the file
Returns:
If file contains tag, returns Tag object
If file doesn't contain tag, returns None
"""
with open(filepath, encoding="utf-8") as parsed_file:
lines = parsed_file.readlines()
line_start: Optional[int] = None
line_finish: Optional[int] = None
tag_prefix: Optional[str] = ""
for idx, line in enumerate(lines):
if line_start is None and line.endswith(Config.BEAM_PLAYGROUND_TITLE):
line_start = idx
prefix_len = len(line) - len(Config.BEAM_PLAYGROUND_TITLE)
tag_prefix = line[:prefix_len]
elif line_start and not line.startswith(tag_prefix):
line_finish = idx
break
if not line_start or not line_finish:
return None
embdedded_yaml_content = "".join(
line[len(tag_prefix) :] for line in lines[line_start:line_finish]
)
yml = yaml.load(embdedded_yaml_content, Loader=yaml.SafeLoader)
try:
return Tag(
filepath=str(filepath),
line_start=line_start,
line_finish=line_finish,
**yml[Config.BEAM_PLAYGROUND],
)
except pydantic.ValidationError as err:
if len(err.errors()) == 1 and err.errors()[0]["msg"] == "multifile is True but no files defined":
logging.warning("incomplete multifile example ignored %s", filepath)
return None
raise
def _load_example(filename, filepath, sdk: SdkEnum) -> Optional[Example]:
"""
Check file by filepath for matching to beam example. If file is beam example,
Args:
filename: name of the file.
filepath: path to the file.
sdk: sdk that using to find examples for the specific sdk.
Returns:
If the file is an example, return Example object
If it's not, return None
In case of error, raise Exception
"""
logging.debug("inspecting file %s", filepath)
extension = filepath.split(os.extsep)[-1]
if extension == Config.SDK_TO_EXTENSION[sdk]:
logging.debug("sdk %s matched extension %s", api_pb2.Sdk.Name(sdk), extension)
tag = get_tag(filepath)
if tag is not None:
logging.debug("playground-beam tag found")
return _get_example(filepath, filename, tag, sdk)
return None
# Make load_supported_categories called only once
# to make testing easier
_load_supported_categories = False
def load_supported_categories(categories_path: str):
"""
Load the list of supported categories from categories_path file
into Tag model config
Args:
categories_path: path to the file with categories.
"""
global _load_supported_categories
if _load_supported_categories:
return
with open(categories_path, encoding="utf-8") as supported_categories:
yaml_object = yaml.load(supported_categories.read(), Loader=yaml.SafeLoader)
Tag.Config.supported_categories = yaml_object[TagFields.categories]
_load_supported_categories = True
def _get_content(filepath: str, tag_start_line: int, tag_finish_line) -> str:
with open(filepath, encoding="utf-8") as parsed_file:
lines = parsed_file.readlines()
lines = lines[:tag_start_line] + lines[tag_finish_line:]
return "".join(lines)
def _get_url_vcs(filepath: str) -> str:
"""
Construct VCS URL from example's filepath
"""
root_dir = os.getenv(BEAM_ROOT_DIR_ENV_VAR_KEY, "../..")
rel_path = os.path.relpath(filepath, root_dir)
url_vcs = "{}/{}".format(Config.URL_VCS_PREFIX, urllib.parse.quote(rel_path))
return url_vcs
def _get_example(filepath: str, filename: str, tag: Tag, sdk: int) -> Example:
"""
Return an Example by filepath and filename.
Args:
filepath: path of the example's file.
filename: name of the example's file.
tag: tag of the example.
Returns:
Parsed Example object.
"""
# Calculate context line with tag removed. Note: context_line is 1-based, line_start and line_finish are 0-based.
context_line = tag.context_line if tag.context_line <= tag.line_start else tag.context_line - (tag.line_finish - tag.line_start)
return Example(
sdk=SdkEnum(sdk),
tag=tag,
filepath=filepath,
status=STATUS_UNSPECIFIED,
type=_get_object_type(filename, filepath),
code=_get_content(filepath, tag.line_start, tag.line_finish),
url_vcs=_get_url_vcs(filepath), # type: ignore
context_line=context_line,
)
async def update_example_status(example: Example, client: GRPCClient):
"""
Receive status for examples and update example.status and pipeline_id
Use client to send requests to the backend:
1. Start code processing.
2. Ping the backend while status is STATUS_VALIDATING/
STATUS_PREPARING/STATUS_COMPILING/STATUS_EXECUTING
Update example.status with resulting status.
Args:
example: beam example for processing and updating status and pipeline_id.
client: client to send requests to the server.
"""
datasets: List[api_pb2.Dataset] = []
for emulator in example.tag.emulators:
dataset: Dataset = example.tag.datasets[emulator.topic.source_dataset]
datasets.append(
api_pb2.Dataset(
type=api_pb2.EmulatorType.Value(
f"EMULATOR_TYPE_{emulator.type.upper()}"
),
options={"topic": emulator.topic.id},
dataset_path=dataset.file_name,
)
)
files: List[api_pb2.SnippetFile] = [
api_pb2.SnippetFile(name=example.filepath, content=example.code, is_main=True)
]
for file in example.tag.files:
files.append(
api_pb2.SnippetFile(name=file.name, content=file.content, is_main=False)
)
pipeline_id = await client.run_code(
example.code, example.sdk, example.tag.pipeline_options, datasets, files=files,
)
example.pipeline_id = pipeline_id
status = await client.check_status(pipeline_id)
while status in [
STATUS_VALIDATING,
STATUS_PREPARING,
STATUS_COMPILING,
STATUS_EXECUTING,
]:
await asyncio.sleep(Config.PAUSE_DELAY)
status = await client.check_status(pipeline_id)
example.status = status
def _get_object_type(filename, filepath):
"""
Get type of an object based on it filename/filepath
Args:
filename: object's filename
filepath: object's filepath
Returns: type of the object (example, kata, unit-test)
"""
filename_no_ext = (os.path.splitext(filename)[0]).lower()
if filename_no_ext.endswith(PrecompiledExampleType.test_ends):
object_type = PRECOMPILED_OBJECT_TYPE_UNIT_TEST
elif PrecompiledExampleType.katas in filepath.split(os.sep):
object_type = PRECOMPILED_OBJECT_TYPE_KATA
elif PrecompiledExampleType.examples in filepath.split(os.sep):
object_type = PRECOMPILED_OBJECT_TYPE_EXAMPLE
else:
object_type = PRECOMPILED_OBJECT_TYPE_UNSPECIFIED
return object_type
class DuplicatesError(Exception):
pass
class ConflictingDatasetsError(Exception):
pass
def validate_examples_for_duplicates_by_name(examples: List[Example]):
"""
Validate examples for duplicates by example name to avoid duplicates in the Cloud Datastore
:param examples: examples from the repository for saving to the Cloud Datastore
"""
duplicates: Dict[str, Example] = {}
for example in examples:
if example.tag.name not in duplicates.keys():
duplicates[example.tag.name] = example
else:
err_msg = f"Examples have duplicate names.\nDuplicates: \n - path #1: {duplicates[example.tag.name].filepath} \n - path #2: {example.filepath}"
logging.error(err_msg)
raise DuplicatesError(err_msg)
def validate_examples_for_conflicting_datasets(examples: List[Example]):
"""
Validate examples for conflicting datasets to avoid conflicts in the Cloud Datastore
:param examples: examples from the repository for saving to the Cloud Datastore
"""
datasets: Dict[str, Dataset] = {}
for example in examples:
for k, v in example.tag.datasets.items():
if k not in datasets:
datasets[k] = v
elif datasets[k].file_name != v.file_name or \
datasets[k].format != v.format or \
datasets[k].location != v.location:
err_msg = f"Examples have conflicting datasets.\n" \
f"Conflicts: \n" \
f" - file_name #1: {datasets[k].file_name} \n" \
f" - format #1: {datasets[k].format} \n" \
f" - location #1: {datasets[k].location} \n" \
f" - file_name #2: {v.file_name}\n" \
f" - format #2: {v.format}\n" \
f" - location #2: {v.location}\n" \
f"Dataset name: {k}"
logging.error(err_msg)
raise ConflictingDatasetsError(err_msg)
|
07d4527881067f158c4f087a0bd714475ebf8acb
|
d5c85f2800cf9c0c07f767d072ac43869b2b7ece
|
/src/macad_gym/envs/urban_2car.py
|
22aca64c9717b3ec4351ee5d8537587224757c96
|
[
"MIT"
] |
permissive
|
praveen-palanisamy/macad-gym
|
86f1125355f9a658994ca513b89a969d27a7e993
|
1006e849ff4c5bf5d29cb43ab8c2a0bdcc992367
|
refs/heads/master
| 2023-05-24T22:42:39.737106
| 2023-01-29T08:09:34
| 2023-01-29T08:09:34
| 186,694,187
| 282
| 65
|
MIT
| 2023-05-20T22:22:12
| 2019-05-14T20:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,703
|
py
|
urban_2car.py
|
#!/usr/bin/env python
import time
from env.carla.multi_env import MultiCarlaEnv
# from env.carla.multi_env import get_next_actions
# config_file = open("urban_2_car_1_ped.json")
# configs = json.load(config_file)
U2C_CONFIGS = {
"env": {
"enable_planner": True,
"server_map": "/Game/Carla/Maps/Town01",
"render": True,
"render_x_res": 800,
"render_y_res": 600,
"x_res": 84,
"y_res": 84,
"framestack": 1,
"discrete_actions": True,
"squash_action_logits": False,
"verbose": False,
"use_depth_camera": False,
"send_measurements": False,
"sync_server": True,
"fixed_delta_seconds": 0.05,
},
"actors": {
"vehicle1": {
"enable_planner": True,
"convert_images_to_video": False,
"early_terminate_on_collision": True,
"reward_function": "corl2017",
"scenarios": "DEFAULT_SCENARIO_TOWN1",
"manual_control": False,
"auto_control": False,
"camera_type": "rgb",
"collision_sensor": "on",
"lane_sensor": "on",
"log_images": False,
"log_measurements": False,
"render": False,
"render_x_res": 800,
"render_y_res": 600,
"x_res": 84,
"y_res": 84,
"use_depth_camera": False,
"send_measurements": False,
},
"vehicle2": {
"enable_planner": True,
"convert_images_to_video": False,
"early_terminate_on_collision": True,
"reward_function": "corl2017",
"scenarios": "DEFAULT_SCENARIO_TOWN1_2",
"manual_control": False,
"auto_control": False,
"camera_type": "rgb",
"collision_sensor": "on",
"lane_sensor": "on",
"log_images": False,
"log_measurements": False,
"render": False,
"render_x_res": 800,
"render_y_res": 600,
"x_res": 84,
"y_res": 84,
"use_depth_camera": False,
"send_measurements": False,
},
},
}
class Urban2Car(MultiCarlaEnv):
"""A 4-way signalized intersection Multi-Agent Carla-Gym environment"""
def __init__(self):
self.configs = U2C_CONFIGS
super(Urban2Car, self).__init__(self.configs)
if __name__ == "__main__":
env = Urban2Car()
configs = env.configs
for ep in range(2):
obs = env.reset()
total_vehicle = env.num_vehicle
total_reward_dict = {}
action_dict = {}
env_config = configs["env"]
actor_configs = configs["actors"]
for actor_id in actor_configs.keys():
total_reward_dict[actor_id] = 0
if env.discrete_actions:
action_dict[actor_id] = 3 # Forward
else:
action_dict[actor_id] = [1, 0] # test values
start = time.time()
i = 0
done = {"__all__": False}
while not done["__all__"]:
# while i < 20: # TEST
i += 1
obs, reward, done, info = env.step(action_dict)
# action_dict = get_next_actions(info, env.discrete_actions)
for actor_id in total_reward_dict.keys():
total_reward_dict[actor_id] += reward[actor_id]
print(":{}\n\t".join(["Step#", "rew", "ep_rew",
"done{}"]).format(i, reward,
total_reward_dict, done))
time.sleep(0.1)
print("{} fps".format(i / (time.time() - start)))
|
1a4a2ba4c381a83969e1f220d65cb6fcef31d540
|
0265181df8e2fbea2f1cb34e9901e2d8c5dae188
|
/examples/simple_generator_consumer.py
|
106b61c1681737e906f7fffb68a5bffc93a2d0c6
|
[
"MIT"
] |
permissive
|
eandersson/amqpstorm
|
5fd33d8ec47b7058ec1ce09c488ecdb612726c0a
|
f5f8031ee57b0d00a2316bce9c47d5767f4ba668
|
refs/heads/2.x
| 2023-08-31T11:25:36.344175
| 2023-08-26T19:57:02
| 2023-08-26T20:05:56
| 23,178,253
| 166
| 43
|
MIT
| 2023-01-02T12:57:12
| 2014-08-21T07:32:19
|
Python
|
UTF-8
|
Python
| false
| false
| 422
|
py
|
simple_generator_consumer.py
|
import logging
from amqpstorm import Connection
logging.basicConfig(level=logging.INFO)
with Connection('localhost', 'guest', 'guest') as connection:
with connection.channel() as channel:
channel.queue.declare('simple_queue')
channel.basic.consume(queue='simple_queue', no_ack=False)
for message in channel.build_inbound_messages():
print(message.body)
message.ack()
|
0ff61d349e7a9d5c37830f8d1fdb1d9a1098cb5c
|
62179a165ec620ba967dbc20016e890978fbff50
|
/tests/openvino/test_telemetry.py
|
e5cb604d2246eef7781fa4b057dc0b2c3db139a0
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,395
|
py
|
test_telemetry.py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openvino.runtime import Model
from openvino.runtime import Shape
from openvino.runtime import Type
from openvino.runtime import op
from openvino.runtime import opset8
import nncf
from nncf import Dataset
from tests.shared.datasets import MockDataset
from tests.shared.helpers import telemetry_send_event_test_driver
INPUT_SHAPE = [2, 1, 1, 1]
def get_mock_model() -> Model:
param_node = op.Parameter(Type.f32, Shape(INPUT_SHAPE))
softmax_axis = 1
softmax_node = opset8.softmax(param_node, softmax_axis)
return Model(softmax_node, [param_node], "mock")
def test_telemetry_is_sent(mocker):
def use_nncf_fn():
model_to_test = get_mock_model()
_ = nncf.quantize(model_to_test, Dataset(MockDataset(INPUT_SHAPE)))
telemetry_send_event_test_driver(mocker, use_nncf_fn)
|
154c95e1e579b192d524451d0dba82e536fe14fe
|
f54290f045fd150f9be640bf8ab4a91f6b9ae3e3
|
/evennia/contrib/grid/extended_room/__init__.py
|
53a673c7932efd9701e9b4edd2bff887cfc06320
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
evennia/evennia
|
54d075093f0ff125be40e17a7bc4e1e0e22cf77b
|
b3ca58b5c1325a3bf57051dfe23560a08d2947b7
|
refs/heads/main
| 2023-09-02T05:29:35.678676
| 2023-09-01T19:06:05
| 2023-09-01T19:06:05
| 16,120,959
| 1,781
| 1,004
|
BSD-3-Clause
| 2023-09-12T18:37:23
| 2014-01-21T22:22:28
|
Python
|
UTF-8
|
Python
| false
| false
| 454
|
py
|
__init__.py
|
"""
Extended Room - Griatch 2012, vincent-lg 2019, Griatch 2023
"""
from .extended_room import CmdExtendedRoomDesc # noqa
from .extended_room import CmdExtendedRoomDetail # noqa
from .extended_room import CmdExtendedRoomGameTime # noqa
from .extended_room import CmdExtendedRoomLook # noqa
from .extended_room import CmdExtendedRoomState # noqa
from .extended_room import ExtendedRoom # noqa
from .extended_room import ExtendedRoomCmdSet # noqa
|
e59a4f1ac813bf736fc0d5b4d8d5a94518905464
|
14f40e51d1f2b0671650ee6c350756b42262c6f2
|
/platformio/registry/mirror.py
|
4b4508f6623dfce96c919f4ef82b4273ee7be86e
|
[
"Apache-2.0"
] |
permissive
|
platformio/platformio-core
|
7cfb8bb60661122f883ca175b6c48c9299fc3262
|
897844ebc172bd8a2e313bdb9011fc7d986863c2
|
refs/heads/develop
| 2023-08-22T01:48:10.215800
| 2023-08-18T11:39:03
| 2023-08-18T11:39:03
| 19,606,299
| 6,058
| 707
|
Apache-2.0
| 2023-09-07T05:23:55
| 2014-05-09T09:38:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,748
|
py
|
mirror.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from urllib.parse import urlparse
from platformio import __registry_mirror_hosts__
from platformio.cache import ContentCache
from platformio.http import HTTPClient
from platformio.registry.client import RegistryClient
class RegistryFileMirrorIterator:
HTTP_CLIENT_INSTANCES = {}
def __init__(self, download_url):
self.download_url = download_url
self._url_parts = urlparse(download_url)
self._mirror = "%s://%s" % (self._url_parts.scheme, self._url_parts.netloc)
self._visited_mirrors = []
def __iter__(self): # pylint: disable=non-iterator-returned
return self
def __next__(self):
cache_key = ContentCache.key_from_args(
"head", self.download_url, self._visited_mirrors
)
with ContentCache("http") as cc:
result = cc.get(cache_key)
if result is not None:
try:
headers = json.loads(result)
return (
headers["Location"],
headers["X-PIO-Content-SHA256"],
)
except (ValueError, KeyError):
pass
http = self.get_http_client()
response = http.send_request(
"head",
self._url_parts.path,
allow_redirects=False,
params=dict(bypass=",".join(self._visited_mirrors))
if self._visited_mirrors
else None,
x_with_authorization=RegistryClient.allowed_private_packages(),
)
stop_conditions = [
response.status_code not in (302, 307),
not response.headers.get("Location"),
not response.headers.get("X-PIO-Mirror"),
response.headers.get("X-PIO-Mirror") in self._visited_mirrors,
]
if any(stop_conditions):
raise StopIteration
self._visited_mirrors.append(response.headers.get("X-PIO-Mirror"))
cc.set(
cache_key,
json.dumps(
{
"Location": response.headers.get("Location"),
"X-PIO-Content-SHA256": response.headers.get(
"X-PIO-Content-SHA256"
),
}
),
"1h",
)
return (
response.headers.get("Location"),
response.headers.get("X-PIO-Content-SHA256"),
)
def get_http_client(self):
if self._mirror not in RegistryFileMirrorIterator.HTTP_CLIENT_INSTANCES:
endpoints = [self._mirror]
for host in __registry_mirror_hosts__:
endpoint = f"https://dl.{host}"
if endpoint not in endpoints:
endpoints.append(endpoint)
RegistryFileMirrorIterator.HTTP_CLIENT_INSTANCES[self._mirror] = HTTPClient(
endpoints
)
return RegistryFileMirrorIterator.HTTP_CLIENT_INSTANCES[self._mirror]
|
9f0cfcfd557cad81042f13b803aa195a6f0a27bf
|
860c31e414c4c280b70ec0872042d715a2d56978
|
/benchmarks/train_mtl_cinc2022/utils/springer_dwt.py
|
06cf9cb904102db087af818f182c3249d9117028
|
[
"MIT"
] |
permissive
|
DeepPSP/torch_ecg
|
255e49ff436e13044a1f049141f982680e56970e
|
a40c65f4fefa83ba7d3d184072a4c05627b7e226
|
refs/heads/master
| 2023-09-01T06:47:17.153216
| 2023-08-31T18:00:47
| 2023-08-31T18:00:47
| 298,482,237
| 111
| 16
|
MIT
| 2023-08-21T11:25:07
| 2020-09-25T06:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,106
|
py
|
springer_dwt.py
|
"""
"""
from typing import Union, Optional
import pywt
import numpy as np
from easydict import EasyDict as ED
__all__ = [
"get_dwt_features",
"get_full_dwt_features",
]
def get_dwt_features(
signal: np.ndarray, fs: int, config: Optional[dict] = None
) -> np.ndarray:
"""
compute the discrete wavelet transform (DWT) features using Springer's algorithm
Parameters
----------
signal : np.ndarray,
the (ECG) signal, of shape (nsamples,)
fs : int,
the sampling frequency
config : dict, optional,
the configuration, with the following keys:
- ``'wavelet_level'``: int,
the level of the wavelet decomposition, default: 3
- ``'wavelet_name'``: str,
the name of the wavelet, default: "db7"
Returns
-------
dwt_features : np.ndarray,
the DWT features, of shape (nsamples,)
"""
cfg = ED(
wavelet_level=3,
wavelet_name="db7",
)
cfg.update(config or {})
siglen = len(signal)
detail_coefs = pywt.downcoef(
"d", signal, wavelet=cfg.wavelet_name, level=cfg.wavelet_level
)
dwt_features = _wkeep1(np.repeat(detail_coefs, 2**cfg.wavelet_level), siglen)
return dwt_features
def get_full_dwt_features(
signal: np.ndarray, fs: int, config: Optional[dict] = None
) -> np.ndarray:
"""
compute the full DWT features using Springer's algorithm
Parameters
----------
signal : np.ndarray,
the (ECG) signal, of shape (nsamples,)
fs : int,
the sampling frequency
config : dict, optional,
the configuration, with the following keys:
- ``'wavelet_level'``: int,
the level of the wavelet decomposition, default: 3
- ``'wavelet_name'``: str,
the name of the wavelet, default: "db7"
Returns
-------
dwt_features : np.ndarray,
the full DWT features, of shape (``'wavelet_level'``, nsamples)
"""
cfg = ED(
wavelet_level=3,
wavelet_name="db7",
)
cfg.update(config or {})
siglen = len(signal)
detail_coefs = pywt.wavedec(signal, cfg.wavelet_name, level=cfg.wavelet_level)[
:0:-1
]
dwt_features = np.zeros((cfg.wavelet_level, siglen), dtype=signal.dtype)
for i, detail_coef in enumerate(detail_coefs):
dwt_features[i] = _wkeep1(np.repeat(detail_coef, 2 ** (i + 1)), siglen)
return dwt_features
def _wkeep1(x: np.ndarray, k: int, opt: Union[str, int] = "c") -> np.ndarray:
"""
modified from the matlab function ``wkeep1``
References
----------
wkeep1.m of the matlab wavelet toolbox
"""
x_len = len(x)
if x_len <= k:
return x
if isinstance(opt, int):
first = opt
elif opt.lower() in ["c", "center", "centre"]:
first = (x_len - k) // 2
elif opt.lower() in ["l", "left"]:
first = 0
elif opt.lower() in ["r", "right"]:
first = x_len - k
else:
raise ValueError(f"Unknown option: {opt}")
assert 0 <= first <= x_len - k
return x[first : first + k]
|
ebd10e5d5840d1aced0ff82dc9afa1bb848390e5
|
766ee8ce546cdc82b4da89f3a3422ba2c378b14a
|
/siim_acr/src/utils/common_util.py
|
03e020d26a4e064b7a397948e022d9d47075b97d
|
[] |
no_license
|
bestfitting/kaggle
|
13c5c2fdc43709c6860ffaad88ee4424910c4294
|
fd2ac94fba182182fd86b8e8d6afec9b4a0408e3
|
refs/heads/master
| 2023-07-23T11:14:32.988474
| 2021-12-31T13:56:36
| 2021-12-31T13:56:36
| 98,124,366
| 314
| 70
| null | 2023-07-06T21:26:56
| 2017-07-23T20:46:23
|
Python
|
UTF-8
|
Python
| false
| false
| 134
|
py
|
common_util.py
|
import os
osp = os.path
ope = os.path.exists
opj = os.path.join
from utils.mask_functions import run_length_encode, run_length_decode
|
2fe9efc5a77ef4ae257c8c61e137a1fc7607cf3f
|
61c456f574fbda51fed682b0727b54165f8aa92c
|
/library/digital_ocean_floating_ip.py
|
963403ce73b67ed0e501989609c1278c1d13dfbd
|
[
"AGPL-3.0-only"
] |
permissive
|
dan-v/algo
|
d31733bfcf126664bd9bf93b6e78eaf7daa31967
|
35d295701ab25d1013c63bc4243a33c20abb4522
|
refs/heads/master
| 2021-07-16T16:32:02.783491
| 2020-05-27T00:16:43
| 2020-05-27T00:16:43
| 164,184,734
| 132
| 13
|
MIT
| 2019-04-21T20:46:34
| 2019-01-05T05:23:37
|
Python
|
UTF-8
|
Python
| false
| false
| 8,443
|
py
|
digital_ocean_floating_ip.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Patrick F. Marques <patrickfmarques@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_floating_ip
short_description: Manage DigitalOcean Floating IPs
description:
- Create/delete/assign a floating IP.
version_added: "2.4"
author: "Patrick Marques (@pmarques)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
ip:
description:
- Public IP address of the Floating IP. Used to remove an IP
region:
description:
- The region that the Floating IP is reserved to.
droplet_id:
description:
- The Droplet that the Floating IP has been assigned to.
oauth_token:
description:
- DigitalOcean OAuth token.
required: true
notes:
- Version 2 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- name: "Create a Floating IP in region lon1"
digital_ocean_floating_ip:
state: present
region: lon1
- name: "Create a Floating IP assigned to Droplet ID 123456"
digital_ocean_floating_ip:
state: present
droplet_id: 123456
- name: "Delete a Floating IP with ip 1.2.3.4"
digital_ocean_floating_ip:
state: absent
ip: "1.2.3.4"
'''
RETURN = '''
# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#floating-ips
data:
description: a DigitalOcean Floating IP resource
returned: success and no resource constraint
type: dict
sample: {
"action": {
"id": 68212728,
"status": "in-progress",
"type": "assign_ip",
"started_at": "2015-10-15T17:45:44Z",
"completed_at": null,
"resource_id": 758603823,
"resource_type": "floating_ip",
"region": {
"name": "New York 3",
"slug": "nyc3",
"sizes": [
"512mb",
"1gb",
"2gb",
"4gb",
"8gb",
"16gb",
"32gb",
"48gb",
"64gb"
],
"features": [
"private_networking",
"backups",
"ipv6",
"metadata"
],
"available": true
},
"region_slug": "nyc3"
}
}
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.digital_ocean import DigitalOceanHelper
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(self.info["body"])
return None
try:
return json.loads(self.body)
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
def wait_action(module, rest, ip, action_id, timeout=10):
end_time = time.time() + 10
while time.time() < end_time:
response = rest.get('floating_ips/{0}/actions/{1}'.format(ip, action_id))
status_code = response.status_code
status = response.json['action']['status']
# TODO: check status_code == 200?
if status == 'completed':
return True
elif status == 'errored':
module.fail_json(msg='Floating ip action error [ip: {0}: action: {1}]'.format(
ip, action_id), data=json)
module.fail_json(msg='Floating ip action timeout [ip: {0}: action: {1}]'.format(
ip, action_id), data=json)
def core(module):
api_token = module.params['oauth_token']
state = module.params['state']
ip = module.params['ip']
droplet_id = module.params['droplet_id']
rest = DigitalOceanHelper(module)
if state in ('present'):
if droplet_id is not None and module.params['ip'] is not None:
# Lets try to associate the ip to the specified droplet
associate_floating_ips(module, rest)
else:
create_floating_ips(module, rest)
elif state in ('absent'):
response = rest.delete("floating_ips/{0}".format(ip))
status_code = response.status_code
json_data = response.json
if status_code == 204:
module.exit_json(changed=True)
elif status_code == 404:
module.exit_json(changed=False)
else:
module.exit_json(changed=False, data=json_data)
def get_floating_ip_details(module, rest):
ip = module.params['ip']
response = rest.get("floating_ips/{0}".format(ip))
status_code = response.status_code
json_data = response.json
if status_code == 200:
return json_data['floating_ip']
else:
module.fail_json(msg="Error assigning floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def assign_floating_id_to_droplet(module, rest):
ip = module.params['ip']
payload = {
"type": "assign",
"droplet_id": module.params['droplet_id'],
}
response = rest.post("floating_ips/{0}/actions".format(ip), data=payload)
status_code = response.status_code
json_data = response.json
if status_code == 201:
wait_action(module, rest, ip, json_data['action']['id'])
module.exit_json(changed=True, data=json_data)
else:
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def associate_floating_ips(module, rest):
floating_ip = get_floating_ip_details(module, rest)
droplet = floating_ip['droplet']
# TODO: If already assigned to a droplet verify if is one of the specified as valid
if droplet is not None and str(droplet['id']) in [module.params['droplet_id']]:
module.exit_json(changed=False)
else:
assign_floating_id_to_droplet(module, rest)
def create_floating_ips(module, rest):
payload = {
}
floating_ip_data = None
if module.params['region'] is not None:
payload["region"] = module.params['region']
if module.params['droplet_id'] is not None:
payload["droplet_id"] = module.params['droplet_id']
floating_ips = rest.get_paginated_data(base_url='floating_ips?', data_key_name='floating_ips')
for floating_ip in floating_ips:
if floating_ip['droplet'] and floating_ip['droplet']['id'] == module.params['droplet_id']:
floating_ip_data = {'floating_ip': floating_ip}
if floating_ip_data:
module.exit_json(changed=False, data=floating_ip_data)
else:
response = rest.post("floating_ips", data=payload)
status_code = response.status_code
json_data = response.json
if status_code == 202:
module.exit_json(changed=True, data=json_data)
else:
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
ip=dict(aliases=['id'], required=False),
region=dict(required=False),
droplet_id=dict(required=False, type='int'),
oauth_token=dict(
no_log=True,
# Support environment variable for DigitalOcean OAuth Token
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']),
required=True,
),
validate_certs=dict(type='bool', default=True),
timeout=dict(type='int', default=30),
),
required_if=[
('state', 'delete', ['ip'])
],
mutually_exclusive=[
['region', 'droplet_id']
],
)
core(module)
if __name__ == '__main__':
main()
|
7ba86046ac3e9726f4e38ac2972816192f002a7e
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTriggerOffline/Btag/test/compareDQM.py
|
f43c81a26e933282b61effc8284c10f82ee5421b
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 3,464
|
py
|
compareDQM.py
|
#!/usr/bin/env python
#
# Launch the script with the command: ./compareDQM.py
# Set below the two DQM input files (DQMfileOld,DQMfileNew)
#
# This script compares the plots cointained in two DQM files and save the superimposed plots
#
from __future__ import print_function
DQMfileOld="/afs/cern.ch/user/s/sdonato/AFSwork/public/DQM_V0001_R000000001__CMSSW_X_Y_Z__RelVal__TrigVal.root"
DQMfileNew="/afs/cern.ch/user/s/sdonato/AFSwork/public/DQM_V0001_R000000002__CMSSW_X_Y_Z__RelVal__TrigVal.root"
labelNew = "New"
labelOld = "Old"
########################## load libraries #################################
import os, string, re, sys, math
try:
import ROOT
except:
print("\nCannot load PYROOT, make sure you have setup ROOT in the path")
print("and pyroot library is also defined in the variable PYTHONPATH, try:\n")
if (os.getenv("PYTHONPATH")):
print(" setenv PYTHONPATH ${PYTHONPATH}:$ROOTSYS/lib\n")
else:
print(" setenv PYTHONPATH $ROOTSYS/lib\n")
sys.exit()
folder="plots"
try:
os.mkdir(folder)
except:
print("folder " + folder + " already exist")
from ROOT import TFile
from ROOT import TCanvas
from ROOT import TLegend
from ROOT import TH1F
from ROOT import TGraphErrors
########################## define a function that return plots given a TFile #################################
def GetPlots(_file0):
# _file0=TFile(filename)
dir1 = _file0.Get("DQMData")
dir2 = dir1.Get("Run 1")
dir3 = dir2.Get("HLT")
dir4 = dir3.Get("Run summary")
plots=[]
for type in dir4.GetListOfKeys():
dirType= dir4.Get(type.GetName())
for triggerKey in dirType.GetListOfKeys():
triggerDir=dirType.Get(triggerKey.GetName())
for plotKey in triggerDir.GetListOfKeys():
plotPointer=triggerDir.Get(plotKey.GetName())
plot=plotPointer
if(plot.GetName()=="efficiency"):
for plotEfficiencyKey in plotPointer.GetListOfKeys():
plot=plotPointer.Get(plotEfficiencyKey.GetName())
plots=plots+[plot.Clone(triggerKey.GetName() + "_" + plot.GetName())]
else:
plots=plots+[plot.Clone(triggerKey.GetName() + "_" + plot.GetName())]
return plots
########################## read DQM plots #################################
fileNew=TFile(DQMfileOld)
plotsNew=0
plotsOld=0
try:
plotsNew = GetPlots(fileNew)
except:
print("Problem with ", fileNew)
fileOld=TFile(DQMfileNew)
try:
plotsOld = GetPlots(fileOld)
except:
print("Problem with ", fileOld)
##### for kind of plots save a .png superimposing the New with the Old #####
ROOT.gROOT.SetBatch()
ROOT.gStyle.SetOptStat(0)
c1 = TCanvas("c1","",1280,720)
c1.SetGridx()
c1.SetGridy()
legend = TLegend(0.07,0.85,0.2,0.93);
first=True
for plotNew in plotsNew:
for plotOld in plotsOld:
if(plotNew.GetName()==plotOld.GetName()):
plotOld.SetLineColor(4)
plotOld.SetMarkerColor(4)
# plotOld.SetFillColor(4)
plotNew.SetLineColor(2)
plotNew.SetMarkerColor(2)
# plotNew.SetFillColor(2)
# plotNew.SetFillStyle(3002)
plotNew.SetLineWidth(2)
plotOld.SetLineWidth(2)
if first:
legend.AddEntry(plotNew,labelNew,"l");
legend.AddEntry(plotOld,labelOld,"l");
if plotNew.GetName().rfind("mistagrate)")>0:
plotOld.SetMinimum(0.001)
plotNew.SetMinimum(0.001)
c1.SetLogy(1)
else:
c1.SetLogy(0)
plotOld.SetMaximum(1.05*max(plotOld.GetMaximum(),plotNew.GetMaximum(),1))
plotOld.Draw()
plotNew.Draw("same")
legend.Draw()
c1.SaveAs(folder+"/"+plotNew.GetName()+".png")
first=False
|
0ab039175c863183ebf83412ca24c920d76e0587
|
985fa63e9c2751bb4fe531f60c33e4ff951ef0b9
|
/xterm256lut_gen.py
|
16d8f4b366b70d2b2e8aeefe12b63054589132c1
|
[
"WTFPL"
] |
permissive
|
jaseg/lolcat
|
f1c61e89a79c2a3d231755dd2c9a0c79fb59ffdb
|
dc3ddf395017bf903f5bfb8f0c5c6d08d84fcdc0
|
refs/heads/main
| 2023-03-20T02:30:47.266302
| 2023-02-26T17:54:34
| 2023-02-26T17:57:20
| 25,680,568
| 366
| 62
|
NOASSERTION
| 2022-11-19T12:01:36
| 2014-10-24T09:12:12
|
C
|
UTF-8
|
Python
| false
| false
| 670
|
py
|
xterm256lut_gen.py
|
xterm_colors = []
# This is ripped out of pygments
# I leave out the 16 standard colors since people tend to re-define them to their liking.
# colors 16..232: the 6x6x6 color cube
_valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(217):
r = _valuerange[(i // 36) % 6]
g = _valuerange[(i // 6) % 6]
b = _valuerange[i % 6]
xterm_colors.append((r, g, b))
# colors 233..253: grayscale
for i in range(1, 24):
v = 8 + i * 10
xterm_colors.append((v, v, v))
print('/* GENERATED HEADER FILE DO NOT EDIT */')
print('union rgb_c xterm256lut[256-16] = {')
for r,g,b in xterm_colors:
print(f' {{{{0x{b:02x}, 0x{g:02x}, 0x{r:02x}}}}},');
print('};');
|
64f6c1d46ea618340f780497ac81f050708d05c5
|
14a5208b7f301ac38b3ce8022ce3d0e3c0e6bc96
|
/libfmp/c2/__init__.py
|
af8974c856fa77a1655989abb503a4ba49388dec
|
[
"MIT"
] |
permissive
|
meinardmueller/libfmp
|
99d78b3c761766d2c0e12919a869c0dfeb93ccc1
|
424127f2cd8317f796ab1591f7c0ec408208e782
|
refs/heads/master
| 2023-06-07T15:12:10.101521
| 2023-05-25T14:42:29
| 2023-05-25T14:42:29
| 320,262,227
| 133
| 18
|
NOASSERTION
| 2023-05-25T07:32:05
| 2020-12-10T12:13:47
|
Python
|
UTF-8
|
Python
| false
| false
| 749
|
py
|
__init__.py
|
from .c2_complex import generate_figure, \
plot_vector
from .c2_fourier import generate_matrix_dft, \
generate_matrix_dft_inv, \
dft, \
idft, \
fft, \
ifft_noscale, \
ifft, \
stft_basic, \
istft_basic, \
stft, istft, \
stft_convention_fmp
from .c2_interpolation import compute_f_coef_linear, \
compute_f_coef_log, \
interpolate_freq_stft
from .c2_interference import plot_interference, \
generate_chirp_linear
from .c2_digitization import generate_function, \
sampling_equidistant, \
reconstruction_sinc, \
plot_graph_quant_function, \
quantize_uniform, \
plot_signal_quant, \
encoding_mu_law, \
decoding_mu_law, \
plot_mu_law, \
quantize_nonuniform_mu
|
6319d5758714c72085099e0604e909f93d8846a7
|
0d543b6f877114fc7ff7f5c2485230f606f6d98d
|
/2021/6.py
|
ef8531f43992c2519a849cde577cf8cd9e6b5695
|
[] |
no_license
|
jonathanpaulson/AdventOfCode
|
eca9d1732ec80dd640d6eed01b3a18d3b3ee455b
|
215f18d7d5b9761ec181954d2e62b6fed3bd12f5
|
refs/heads/master
| 2023-01-08T00:25:09.651009
| 2022-12-25T05:39:11
| 2022-12-25T05:39:11
| 321,228,487
| 227
| 103
| null | 2022-12-01T09:31:36
| 2020-12-14T04:03:53
|
Python
|
UTF-8
|
Python
| false
| false
| 520
|
py
|
6.py
|
#!/usr/bin/python3
import sys
from collections import defaultdict, Counter
infile = sys.argv[1] if len(sys.argv)>1 else '6.in'
X = Counter([int(x) for x in open(infile).read().strip().split(',')])
def solve(S, n):
X = S
for day in range(n):
Y = defaultdict(int)
for x,cnt in X.items():
if x==0:
Y[6] += cnt
Y[8] += cnt
else:
Y[x-1] += cnt
X = Y
return sum(X.values())
print(solve(X, 80))
print(solve(X, 256))
|
22dd341133f90756b54c0eb8093b651e56f759c8
|
2ac03b8c24df220ea32ea525e1d65aeb294cd1a4
|
/custom_components/waste_collection_schedule/waste_collection_schedule/wizard/offenbach_de.py
|
98f614bceba8f2877c08f52a18c65c053cd8e7ab
|
[
"MIT"
] |
permissive
|
mampfes/hacs_waste_collection_schedule
|
a7b98319a7483dedc8cf78b724f93932934c1702
|
1dc9476efef9963a141b9ac987e2708224b9eaaf
|
refs/heads/master
| 2023-08-16T21:14:46.088962
| 2023-08-16T10:05:24
| 2023-08-16T10:05:24
| 254,347,436
| 495
| 428
|
MIT
| 2023-09-12T18:59:07
| 2020-04-09T11:02:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,511
|
py
|
offenbach_de.py
|
#!/usr/bin/env python3
import re
import site
import inquirer
import requests
import traceback
s = requests.Session()
def get_streets(answers):
url = "https://www.insert-it.de/BmsAbfallkalenderOffenbach/Main/GetStreets"
params = { "text": answers['street'],
"filter%5Bfilters%5D%5B0%5D%5Bvalue%5D": answers['street'],
"filter%5Bfilters%5D%5B0%5D%5Bfield%5D": "Name",
"filter%5Bfilters%5D%5B0%5D%5Boperator%5D": "contains",
"filter%5Bfilters%5D%5B0%5D%5BignoreCase%5D": "true",
"filter%5Blogic%5D": "and" }
r = s.get(url, params=params)
r.raise_for_status()
return r.json()
def get_numbers(streetname, jdata):
url = "https://www.insert-it.de/BmsAbfallkalenderOffenbach/Main/GetLocations"
streetId = [ i['ID'] for i in jdata if i['Name'] == streetname] [0]
params = {
"streetId": streetId,
"filter[filters][0][field]": "ID",
"filter[filters][0][operator]": "eq",
"filter[filters][0][value]": streetId,
"filter[logic]": "and"
}
r = s.get(url, params=params)
r.raise_for_status()
return r.json()
def main():
questions = [inquirer.Text("street", message="Enter search string for street")]
jdata = []
while not len(jdata):
try:
answers = inquirer.prompt(questions)
jdata = get_streets(answers)
except Exception as e:
traceback.print_exc()
sys.exit()
questions = [
inquirer.List(
"streetname", choices=[i['Name'] for i in jdata], message="Select street"
)
]
try:
answers = inquirer.prompt(questions)
jdata = get_numbers(**answers, jdata=jdata)
except Exception as e:
traceback.print_exc()
sys.exit()
questions = [
inquirer.List(
"streetnumber", choices=[i['Text'] for i in jdata], message="Select number"
)
]
answers = inquirer.prompt(questions)
location_id = [ i['ID'] for i in jdata if i['Text'] == answers['streetnumber']][0]
print("Copy the following statements into your configuration.yaml:\n")
print("# waste_collection_schedule source configuration")
print("waste_collection_schedule:")
print(" sources:")
print(" - name: offenbach_de")
print(" args:")
print(f" f_id_location: {location_id}")
if __name__ == "__main__":
main()
|
aae58e6411440ca1c95553f917c05fb573db5e2d
|
9882a8d98429fe0f227b062b0e89da9b881e902c
|
/model/blocks/resnet_blocks.py
|
0b2f0ccd623cff8cf8345660b13099b6b4b91752
|
[
"Apache-2.0"
] |
permissive
|
grib0ed0v/face_recognition.pytorch
|
87306a5b8c7ded2bf61ddaf2166bb868be8e72cc
|
05cb9b30e8220445fcb27988926d88f330091c12
|
refs/heads/develop
| 2020-04-26T04:11:51.213924
| 2019-04-10T11:04:21
| 2019-04-10T11:04:21
| 173,293,569
| 170
| 23
|
Apache-2.0
| 2019-04-10T11:04:22
| 2019-03-01T11:49:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,829
|
py
|
resnet_blocks.py
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch.nn as nn
from model.blocks.shared_blocks import make_activation
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, activation=nn.ReLU):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.act1 = make_activation(activation)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.act2 = make_activation(activation)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.act3 = make_activation(activation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.act3(out)
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, activation=nn.ReLU):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = make_activation(activation)
self.conv2 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
50c22aa91c15ebd25f3475ba298f575b474f9644
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyNumpyType/ArgSort.py
|
59e682cb7a1da8f7b52061291916f9e7de5a8835
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 2,403
|
py
|
ArgSort.py
|
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
try:
argsort = a.argsort
except AttributeError:
return _wrapit(a, 'argsort', axis, kind, order)
return argsort(axis, kind, order)
x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
argsort(x, order=('x', 'y'))
|
21692fc23afc6ebda3b84cff0cd60f0ddcbe0131
|
21ec50d37212bae1714b28ede8cc9bf7b07e2122
|
/src/mwparserfromhell/nodes/extras/attribute.py
|
fc3421a59557d6e09e7e7211a448de01c544dd3b
|
[
"MIT"
] |
permissive
|
earwig/mwparserfromhell
|
c669936e6b3cbc2fa6c20d89461cb98d4cf2ef01
|
0f89f4426bdd9e184ae8c8223672a7a0bf36eb76
|
refs/heads/main
| 2023-08-05T10:31:15.885448
| 2023-04-06T03:39:51
| 2023-04-06T03:39:51
| 4,386,648
| 612
| 79
|
MIT
| 2023-09-05T04:44:13
| 2012-05-20T18:45:54
|
Python
|
UTF-8
|
Python
| false
| false
| 5,294
|
py
|
attribute.py
|
# Copyright (C) 2012-2020 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ...string_mixin import StringMixIn
from ...utils import parse_anything
__all__ = ["Attribute"]
class Attribute(StringMixIn):
"""Represents an attribute of an HTML tag.
This is used by :class:`.Tag` objects. For example, the tag
``<ref name="foo">`` contains an Attribute whose name is ``"name"`` and
whose value is ``"foo"``.
"""
def __init__(
self,
name,
value=None,
quotes='"',
pad_first=" ",
pad_before_eq="",
pad_after_eq="",
):
super().__init__()
self.name = name
self._quotes = None
self.value = value
self.quotes = quotes
self.pad_first = pad_first
self.pad_before_eq = pad_before_eq
self.pad_after_eq = pad_after_eq
def __str__(self):
result = self.pad_first + str(self.name) + self.pad_before_eq
if self.value is not None:
result += "=" + self.pad_after_eq
if self.quotes:
return result + self.quotes + str(self.value) + self.quotes
return result + str(self.value)
return result
@staticmethod
def _value_needs_quotes(val):
"""Return valid quotes for the given value, or None if unneeded."""
if not val:
return None
val = "".join(str(node) for node in val.filter_text(recursive=False))
if not any(char.isspace() for char in val):
return None
if "'" in val and '"' not in val:
return '"'
if '"' in val and "'" not in val:
return "'"
return "\"'" # Either acceptable, " preferred over '
def _set_padding(self, attr, value):
"""Setter for the value of a padding attribute."""
if not value:
setattr(self, attr, "")
else:
value = str(value)
if not value.isspace():
raise ValueError("padding must be entirely whitespace")
setattr(self, attr, value)
@staticmethod
def coerce_quotes(quotes):
"""Coerce a quote type into an acceptable value, or raise an error."""
orig, quotes = quotes, str(quotes) if quotes else None
if quotes not in [None, '"', "'"]:
raise ValueError("{!r} is not a valid quote type".format(orig))
return quotes
@property
def name(self):
"""The name of the attribute as a :class:`.Wikicode` object."""
return self._name
@property
def value(self):
"""The value of the attribute as a :class:`.Wikicode` object."""
return self._value
@property
def quotes(self):
"""How to enclose the attribute value. ``"``, ``'``, or ``None``."""
return self._quotes
@property
def pad_first(self):
"""Spacing to insert right before the attribute."""
return self._pad_first
@property
def pad_before_eq(self):
"""Spacing to insert right before the equal sign."""
return self._pad_before_eq
@property
def pad_after_eq(self):
"""Spacing to insert right after the equal sign."""
return self._pad_after_eq
@name.setter
def name(self, value):
self._name = parse_anything(value)
@value.setter
def value(self, newval):
if newval is None:
self._value = None
else:
code = parse_anything(newval)
quotes = self._value_needs_quotes(code)
if quotes and (not self.quotes or self.quotes not in quotes):
self._quotes = quotes[0]
self._value = code
@quotes.setter
def quotes(self, value):
value = self.coerce_quotes(value)
if not value and self._value_needs_quotes(self.value):
raise ValueError("attribute value requires quotes")
self._quotes = value
@pad_first.setter
def pad_first(self, value):
self._set_padding("_pad_first", value)
@pad_before_eq.setter
def pad_before_eq(self, value):
self._set_padding("_pad_before_eq", value)
@pad_after_eq.setter
def pad_after_eq(self, value):
self._set_padding("_pad_after_eq", value)
|
d81b5d0e7f5a616ca7fa0dab716a92f8ccbe42c2
|
a2b20597759990445081057d35d113434cfcf970
|
/source/interprocedural_analyses/taint/test/integration/side_effects.py
|
a5c0623c329996fdf4a0b38d0a3fc4d2cc6f54a1
|
[
"MIT"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
side_effects.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from builtins import _test_sink, _test_source
def test_from_1_to_0():
x = 0
change_arg0(x, _test_source())
return x
def test_from_0_to_1():
y = 0
change_arg1(_test_source(), y)
return y
def test_from_1_to_0_nested():
x = {}
change_arg0(x.foo, _test_source())
return x.foo
def test_from_1_to_0_nested_distinct():
x = {}
change_arg0(x.foo, _test_source())
return x.bar
def test_list_append():
l = MyList()
l.append(_test_source())
return l
def wrapper_from_1_to_0(x, y):
change_arg0(x, y)
def wrapper_from_0_to_1(x, y):
change_arg1(x, y)
def to_sink_via_side_effect(y):
x = {}
change_arg0(x, y)
_test_sink(x)
def dict_to_sink_via_side_effect(y):
x = {}
change_arg0(x["foo"], y)
_test_sink(x)
# Mocks that have models
def change_arg0(arg0, arg1):
...
def change_arg1(arg0, arg1):
...
class MyList:
def append(self, arg):
pass
|
b0283d1be35a74ee8175310ea74a93b25eb2366a
|
67ce6a1d1369463b15023cc5bd1be9e823bab398
|
/lib/pymedphys/_utilities/compression.py
|
7d512877a5f78c335ca0c166c69b7091cfcedf0d
|
[
"Apache-2.0"
] |
permissive
|
pymedphys/pymedphys
|
2487efe7259cc4e226e93d32fe86cef01673016e
|
f6acdf9bd2e8a32e372966879284fbd71c612358
|
refs/heads/main
| 2023-08-05T06:27:48.110296
| 2023-06-07T18:22:09
| 2023-06-07T18:22:09
| 168,238,552
| 288
| 79
|
Apache-2.0
| 2023-05-30T03:23:50
| 2019-01-29T22:20:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
compression.py
|
# Copyright (C) 2018 Simon Biggs
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lzma
from glob import glob
def compress_test_file(filepath):
with open(filepath, "rb") as load_file:
with lzma.open("{}.xz".format(filepath), "w") as save_file:
save_file.write(load_file.read())
def compress_test_files(glob_string, exclude_xz_files=True):
files_to_compress = glob(glob_string, recursive=True)
for filepath in files_to_compress:
if not filepath.endswith(".xz") or not exclude_xz_files:
compress_test_file(filepath)
def decompress_test_files():
pass
|
d60ce4975bac96f5d6296a29561b36380bb65afc
|
6c3259af340d28cdfe7aad63cfb1fb6bc81fb3da
|
/pytest_django/fixtures.py
|
ffc9d4c96b63c6b7c5292ba0adf9a46dfa3fdccc
|
[
"BSD-3-Clause"
] |
permissive
|
pytest-dev/pytest-django
|
df91bfb5536d6fb0a94cff647c4eb68c005c9e0d
|
53373573f905ec5e0ec5786f49efdcdca5ae41fd
|
refs/heads/master
| 2023-08-31T08:33:58.355321
| 2023-04-05T08:15:33
| 2023-04-05T08:15:33
| 2,484,397
| 1,169
| 344
|
NOASSERTION
| 2023-08-05T14:24:05
| 2011-09-29T19:35:55
|
Python
|
UTF-8
|
Python
| false
| false
| 19,147
|
py
|
fixtures.py
|
"""All pytest-django fixtures"""
import os
from contextlib import contextmanager
from functools import partial
from typing import Any, Generator, Iterable, List, Optional, Tuple, Union
import pytest
from . import live_server_helper
from .django_compat import is_django_unittest
from .lazy_django import skip_if_no_django
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Literal
import django
_DjangoDbDatabases = Optional[Union["Literal['__all__']", Iterable[str]]]
# transaction, reset_sequences, databases, serialized_rollback
_DjangoDb = Tuple[bool, bool, _DjangoDbDatabases, bool]
__all__ = [
"django_db_setup",
"db",
"transactional_db",
"django_db_reset_sequences",
"django_db_serialized_rollback",
"admin_user",
"django_user_model",
"django_username_field",
"client",
"async_client",
"admin_client",
"rf",
"async_rf",
"settings",
"live_server",
"_live_server_helper",
"django_assert_num_queries",
"django_assert_max_num_queries",
"django_capture_on_commit_callbacks",
]
@pytest.fixture(scope="session")
def django_db_modify_db_settings_tox_suffix() -> None:
skip_if_no_django()
tox_environment = os.getenv("TOX_PARALLEL_ENV")
if tox_environment:
# Put a suffix like _py27-django21 on tox workers
_set_suffix_to_test_databases(suffix=tox_environment)
@pytest.fixture(scope="session")
def django_db_modify_db_settings_xdist_suffix(request) -> None:
skip_if_no_django()
xdist_suffix = getattr(request.config, "workerinput", {}).get("workerid")
if xdist_suffix:
# Put a suffix like _gw0, _gw1 etc on xdist processes
_set_suffix_to_test_databases(suffix=xdist_suffix)
@pytest.fixture(scope="session")
def django_db_modify_db_settings_parallel_suffix(
django_db_modify_db_settings_tox_suffix: None,
django_db_modify_db_settings_xdist_suffix: None,
) -> None:
skip_if_no_django()
@pytest.fixture(scope="session")
def django_db_modify_db_settings(
django_db_modify_db_settings_parallel_suffix: None,
) -> None:
skip_if_no_django()
@pytest.fixture(scope="session")
def django_db_use_migrations(request) -> bool:
return not request.config.getvalue("nomigrations")
@pytest.fixture(scope="session")
def django_db_keepdb(request) -> bool:
return request.config.getvalue("reuse_db")
@pytest.fixture(scope="session")
def django_db_createdb(request) -> bool:
return request.config.getvalue("create_db")
@pytest.fixture(scope="session")
def django_db_setup(
request,
django_test_environment: None,
django_db_blocker,
django_db_use_migrations: bool,
django_db_keepdb: bool,
django_db_createdb: bool,
django_db_modify_db_settings: None,
) -> None:
"""Top level fixture to ensure test databases are available"""
from django.test.utils import setup_databases, teardown_databases
setup_databases_args = {}
if not django_db_use_migrations:
_disable_migrations()
if django_db_keepdb and not django_db_createdb:
setup_databases_args["keepdb"] = True
with django_db_blocker.unblock():
db_cfg = setup_databases(
verbosity=request.config.option.verbose,
interactive=False,
**setup_databases_args
)
def teardown_database() -> None:
with django_db_blocker.unblock():
try:
teardown_databases(db_cfg, verbosity=request.config.option.verbose)
except Exception as exc:
request.node.warn(
pytest.PytestWarning(
f"Error when trying to teardown test databases: {exc!r}"
)
)
if not django_db_keepdb:
request.addfinalizer(teardown_database)
@pytest.fixture()
def _django_db_helper(
request,
django_db_setup: None,
django_db_blocker,
) -> None:
from django import VERSION
if is_django_unittest(request):
return
marker = request.node.get_closest_marker("django_db")
if marker:
(
transactional,
reset_sequences,
databases,
serialized_rollback,
) = validate_django_db(marker)
else:
(
transactional,
reset_sequences,
databases,
serialized_rollback,
) = False, False, None, False
transactional = transactional or reset_sequences or (
"transactional_db" in request.fixturenames
or "live_server" in request.fixturenames
)
reset_sequences = reset_sequences or (
"django_db_reset_sequences" in request.fixturenames
)
serialized_rollback = serialized_rollback or (
"django_db_serialized_rollback" in request.fixturenames
)
django_db_blocker.unblock()
request.addfinalizer(django_db_blocker.restore)
import django.db
import django.test
if transactional:
test_case_class = django.test.TransactionTestCase
else:
test_case_class = django.test.TestCase
_reset_sequences = reset_sequences
_serialized_rollback = serialized_rollback
_databases = databases
class PytestDjangoTestCase(test_case_class): # type: ignore[misc,valid-type]
reset_sequences = _reset_sequences
serialized_rollback = _serialized_rollback
if _databases is not None:
databases = _databases
# For non-transactional tests, skip executing `django.test.TestCase`'s
# `setUpClass`/`tearDownClass`, only execute the super class ones.
#
# `TestCase`'s class setup manages the `setUpTestData`/class-level
# transaction functionality. We don't use it; instead we (will) offer
# our own alternatives. So it only adds overhead, and does some things
# which conflict with our (planned) functionality, particularly, it
# closes all database connections in `tearDownClass` which inhibits
# wrapping tests in higher-scoped transactions.
#
# It's possible a new version of Django will add some unrelated
# functionality to these methods, in which case skipping them completely
# would not be desirable. Let's cross that bridge when we get there...
if not transactional:
@classmethod
def setUpClass(cls) -> None:
super(django.test.TestCase, cls).setUpClass()
if VERSION < (4, 1):
django.db.transaction.Atomic._ensure_durability = False
@classmethod
def tearDownClass(cls) -> None:
if VERSION < (4, 1):
django.db.transaction.Atomic._ensure_durability = True
super(django.test.TestCase, cls).tearDownClass()
PytestDjangoTestCase.setUpClass()
if VERSION >= (4, 0):
request.addfinalizer(PytestDjangoTestCase.doClassCleanups)
request.addfinalizer(PytestDjangoTestCase.tearDownClass)
test_case = PytestDjangoTestCase(methodName="__init__")
test_case._pre_setup()
request.addfinalizer(test_case._post_teardown)
def validate_django_db(marker) -> "_DjangoDb":
"""Validate the django_db marker.
It checks the signature and creates the ``transaction``,
``reset_sequences``, ``databases`` and ``serialized_rollback`` attributes on
the marker which will have the correct values.
Sequence reset and serialized_rollback are only allowed when combined with
transaction.
"""
def apifun(
transaction: bool = False,
reset_sequences: bool = False,
databases: "_DjangoDbDatabases" = None,
serialized_rollback: bool = False,
) -> "_DjangoDb":
return transaction, reset_sequences, databases, serialized_rollback
return apifun(*marker.args, **marker.kwargs)
def _disable_migrations() -> None:
from django.conf import settings
from django.core.management.commands import migrate
class DisableMigrations:
def __contains__(self, item: str) -> bool:
return True
def __getitem__(self, item: str) -> None:
return None
settings.MIGRATION_MODULES = DisableMigrations()
class MigrateSilentCommand(migrate.Command):
def handle(self, *args, **kwargs):
kwargs["verbosity"] = 0
return super().handle(*args, **kwargs)
migrate.Command = MigrateSilentCommand
def _set_suffix_to_test_databases(suffix: str) -> None:
from django.conf import settings
for db_settings in settings.DATABASES.values():
test_name = db_settings.get("TEST", {}).get("NAME")
if not test_name:
if db_settings["ENGINE"] == "django.db.backends.sqlite3":
continue
test_name = f"test_{db_settings['NAME']}"
if test_name == ":memory:":
continue
db_settings.setdefault("TEST", {})
db_settings["TEST"]["NAME"] = f"{test_name}_{suffix}"
# ############### User visible fixtures ################
@pytest.fixture(scope="function")
def db(_django_db_helper: None) -> None:
"""Require a django test database.
This database will be setup with the default fixtures and will have
the transaction management disabled. At the end of the test the outer
transaction that wraps the test itself will be rolled back to undo any
changes to the database (in case the backend supports transactions).
This is more limited than the ``transactional_db`` fixture but
faster.
If both ``db`` and ``transactional_db`` are requested,
``transactional_db`` takes precedence.
"""
# The `_django_db_helper` fixture checks if `db` is requested.
@pytest.fixture(scope="function")
def transactional_db(_django_db_helper: None) -> None:
"""Require a django test database with transaction support.
This will re-initialise the django database for each test and is
thus slower than the normal ``db`` fixture.
If you want to use the database with transactions you must request
this resource.
If both ``db`` and ``transactional_db`` are requested,
``transactional_db`` takes precedence.
"""
# The `_django_db_helper` fixture checks if `transactional_db` is requested.
@pytest.fixture(scope="function")
def django_db_reset_sequences(
_django_db_helper: None,
transactional_db: None,
) -> None:
"""Require a transactional test database with sequence reset support.
This requests the ``transactional_db`` fixture, and additionally
enforces a reset of all auto increment sequences. If the enquiring
test relies on such values (e.g. ids as primary keys), you should
request this resource to ensure they are consistent across tests.
"""
# The `_django_db_helper` fixture checks if `django_db_reset_sequences`
# is requested.
@pytest.fixture(scope="function")
def django_db_serialized_rollback(
_django_db_helper: None,
db: None,
) -> None:
"""Require a test database with serialized rollbacks.
This requests the ``db`` fixture, and additionally performs rollback
emulation - serializes the database contents during setup and restores
it during teardown.
This fixture may be useful for transactional tests, so is usually combined
with ``transactional_db``, but can also be useful on databases which do not
support transactions.
Note that this will slow down that test suite by approximately 3x.
"""
# The `_django_db_helper` fixture checks if `django_db_serialized_rollback`
# is requested.
@pytest.fixture()
def client() -> "django.test.client.Client":
"""A Django test client instance."""
skip_if_no_django()
from django.test.client import Client
return Client()
@pytest.fixture()
def async_client() -> "django.test.client.AsyncClient":
"""A Django test async client instance."""
skip_if_no_django()
from django.test.client import AsyncClient
return AsyncClient()
@pytest.fixture()
def django_user_model(db: None):
"""The class of Django's user model."""
from django.contrib.auth import get_user_model
return get_user_model()
@pytest.fixture()
def django_username_field(django_user_model) -> str:
"""The fieldname for the username used with Django's user model."""
return django_user_model.USERNAME_FIELD
@pytest.fixture()
def admin_user(
db: None,
django_user_model,
django_username_field: str,
):
"""A Django admin user.
This uses an existing user with username "admin", or creates a new one with
password "password".
"""
UserModel = django_user_model
username_field = django_username_field
username = "admin@example.com" if username_field == "email" else "admin"
try:
# The default behavior of `get_by_natural_key()` is to look up by `username_field`.
# However the user model is free to override it with any sort of custom behavior.
# The Django authentication backend already assumes the lookup is by username,
# so we can assume so as well.
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
user_data = {}
if "email" in UserModel.REQUIRED_FIELDS:
user_data["email"] = "admin@example.com"
user_data["password"] = "password"
user_data[username_field] = username
user = UserModel._default_manager.create_superuser(**user_data)
return user
@pytest.fixture()
def admin_client(
db: None,
admin_user,
) -> "django.test.client.Client":
"""A Django test client logged in as an admin user."""
from django.test.client import Client
client = Client()
client.force_login(admin_user)
return client
@pytest.fixture()
def rf() -> "django.test.client.RequestFactory":
"""RequestFactory instance"""
skip_if_no_django()
from django.test.client import RequestFactory
return RequestFactory()
@pytest.fixture()
def async_rf() -> "django.test.client.AsyncRequestFactory":
"""AsyncRequestFactory instance"""
skip_if_no_django()
from django.test.client import AsyncRequestFactory
return AsyncRequestFactory()
class SettingsWrapper:
_to_restore: List[Any] = []
def __delattr__(self, attr: str) -> None:
from django.test import override_settings
override = override_settings()
override.enable()
from django.conf import settings
delattr(settings, attr)
self._to_restore.append(override)
def __setattr__(self, attr: str, value) -> None:
from django.test import override_settings
override = override_settings(**{attr: value})
override.enable()
self._to_restore.append(override)
def __getattr__(self, attr: str):
from django.conf import settings
return getattr(settings, attr)
def finalize(self) -> None:
for override in reversed(self._to_restore):
override.disable()
del self._to_restore[:]
@pytest.fixture()
def settings():
"""A Django settings object which restores changes after the testrun"""
skip_if_no_django()
wrapper = SettingsWrapper()
yield wrapper
wrapper.finalize()
@pytest.fixture(scope="session")
def live_server(request):
"""Run a live Django server in the background during tests
The address the server is started from is taken from the
--liveserver command line option or if this is not provided from
the DJANGO_LIVE_TEST_SERVER_ADDRESS environment variable. If
neither is provided ``localhost`` is used. See the Django
documentation for its full syntax.
NOTE: If the live server needs database access to handle a request
your test will have to request database access. Furthermore
when the tests want to see data added by the live-server (or
the other way around) transactional database access will be
needed as data inside a transaction is not shared between
the live server and test code.
Static assets will be automatically served when
``django.contrib.staticfiles`` is available in INSTALLED_APPS.
"""
skip_if_no_django()
addr = request.config.getvalue("liveserver") or os.getenv(
"DJANGO_LIVE_TEST_SERVER_ADDRESS"
) or "localhost"
server = live_server_helper.LiveServer(addr)
request.addfinalizer(server.stop)
return server
@pytest.fixture(autouse=True, scope="function")
def _live_server_helper(request) -> None:
"""Helper to make live_server work, internal to pytest-django.
This helper will dynamically request the transactional_db fixture
for a test which uses the live_server fixture. This allows the
server and test to access the database without having to mark
this explicitly which is handy since it is usually required and
matches the Django behaviour.
The separate helper is required since live_server can not request
transactional_db directly since it is session scoped instead of
function-scoped.
It will also override settings only for the duration of the test.
"""
if "live_server" not in request.fixturenames:
return
request.getfixturevalue("transactional_db")
live_server = request.getfixturevalue("live_server")
live_server._live_server_modified_settings.enable()
request.addfinalizer(live_server._live_server_modified_settings.disable)
@contextmanager
def _assert_num_queries(
config,
num: int,
exact: bool = True,
connection=None,
info=None,
) -> Generator["django.test.utils.CaptureQueriesContext", None, None]:
from django.test.utils import CaptureQueriesContext
if connection is None:
from django.db import connection as conn
else:
conn = connection
verbose = config.getoption("verbose") > 0
with CaptureQueriesContext(conn) as context:
yield context
num_performed = len(context)
if exact:
failed = num != num_performed
else:
failed = num_performed > num
if failed:
msg = f"Expected to perform {num} queries "
if not exact:
msg += "or less "
verb = "was" if num_performed == 1 else "were"
msg += f"but {num_performed} {verb} done"
if info:
msg += f"\n{info}"
if verbose:
sqls = (q["sql"] for q in context.captured_queries)
msg += "\n\nQueries:\n========\n\n" + "\n\n".join(sqls)
else:
msg += " (add -v option to show queries)"
pytest.fail(msg)
@pytest.fixture(scope="function")
def django_assert_num_queries(pytestconfig):
return partial(_assert_num_queries, pytestconfig)
@pytest.fixture(scope="function")
def django_assert_max_num_queries(pytestconfig):
return partial(_assert_num_queries, pytestconfig, exact=False)
@pytest.fixture(scope="function")
def django_capture_on_commit_callbacks():
from django.test import TestCase
return TestCase.captureOnCommitCallbacks
|
b3d1a03e2d6b95fa90e0e8f3272f8d0cde1cd104
|
2e038c642350e9a29bcd845b2f922f9c017fa7d8
|
/src/westpa/cli/tools/plothist.py
|
1a2416ff2e55525696e93c592c641d16e407981a
|
[
"MIT"
] |
permissive
|
westpa/westpa
|
e8e0952bdbe9a95f06eca07762e1e9372156dd9a
|
85ed1c54159d639d2fcb9e23c45f93743bfed2e0
|
refs/heads/westpa2
| 2023-09-01T11:21:44.944424
| 2023-08-11T21:56:40
| 2023-08-11T21:56:40
| 24,576,160
| 181
| 66
|
MIT
| 2023-09-14T16:46:54
| 2014-09-29T02:04:49
|
Python
|
UTF-8
|
Python
| false
| false
| 28,051
|
py
|
plothist.py
|
import logging
import os
import re
import h5py
import numpy as np
import matplotlib
from matplotlib import pyplot
from matplotlib.image import NonUniformImage
from westpa.tools import WESTMasterCommand, WESTSubcommand
from westpa.core import h5io, textio
from westpa.fasthist import normhistnd
from westpa.core.extloader import get_object
log = logging.getLogger('plothist')
# Suppress divide-by-zero in log
np.seterr(divide='ignore', invalid='ignore')
def sum_except_along(array, axes):
'''Reduce the given array by addition over all axes except those listed in the scalar or
iterable ``axes``'''
try:
iter(axes)
except TypeError:
axes = [axes]
kept = set(axes)
summed = list(set(range(array.ndim)) - kept)
# Reorder axes so that the kept axes are first, and in the order they
# were given
array = np.transpose(array, list(axes) + summed).copy()
# Now, the last len(summed) axes are summed over
for _ in range(len(summed)):
array = np.add.reduce(array, axis=-1)
return array
class PlotHistBase(WESTSubcommand):
def __init__(self, parent):
super().__init__(parent)
self.input_arg_group = None
self.output_arg_group = None
self.input_h5 = None
self.opmode = None
self.plotscale = None
self.enerzero = None
self.plotrange = None
self.plottitle = None
self.postprocess_function = None
self.plot_contour = None
# Iteration range for average/evolution
self.avail_iter_start = None
self.avail_iter_stop = None
self.avail_iter_step = None
self.iter_start = None
self.iter_stop = None
self.iter_step = None
# Iteration for single point
self.n_iter = None
# An array of dicts describing what dimensions to work with and
# what their ranges should be for the plots.
self.dimensions = []
self.plot_output_filename = None
self.text_output_filename = None
self.hdf5_output_filename = None
def add_args(self, parser):
igroup = self.input_arg_group = parser.add_argument_group('input options')
igroup.add_argument('input', help='HDF5 file containing histogram data')
igroup.add_argument(
'firstdim',
nargs='?',
metavar='DIMENSION',
help='''Plot for the given DIMENSION, specified as INT[:[LB,UB]:LABEL], where
INT is a zero-based integer identifying the dimension in the histogram,
LB and UB are lower and upper bounds for plotting, and LABEL is the label for
the plot axis. (Default: dimension 0, full range.)''',
)
ogroup = self.output_arg_group = parser.add_argument_group('output options')
ogroup.add_argument(
'-o',
'--output',
'--plot-output',
dest='plot_output',
default='hist.pdf',
metavar='PLOT_OUTPUT',
help='''Store plot as PLOT_OUTPUT. This may be set to an empty string
(e.g. --plot-output='') to suppress plotting entirely. The output
format is determined by filename extension (and thus defaults to PDF).
Default: "%(default)s".''',
)
ogroup.add_argument('--hdf5-output', help='''Store plot data in the HDF5 file HDF5_OUTPUT.''')
ogroup.add_argument(
'--plot-contour',
dest='plot_contour',
action='store_const',
const=True,
default=False,
help='''Determines whether or not to superimpose a contour plot over the heatmap for 2D objects.''',
)
pgroup = parser.add_argument_group('plot options')
pmgroup = pgroup.add_mutually_exclusive_group()
pgroup.add_argument('--title', dest='title', help='Include TITLE as the top-of-graph title')
pmgroup.add_argument(
'--linear', dest='plotscale', action='store_const', const='linear', help='Plot the histogram on a linear scale.'
)
pmgroup.add_argument(
'--energy',
dest='plotscale',
action='store_const',
const='energy',
help='Plot the histogram on an inverted natural log scale, corresponding to (free) energy (default).',
)
pmgroup.add_argument(
'--zero-energy',
dest='enerzero',
metavar='E',
default='min',
help='Set the zero of energy to E, which may be a scalar, "min" or "max"',
)
pmgroup.add_argument(
'--log10', dest='plotscale', action='store_const', const='log10', help='Plot the histogram on a base-10 log scale.'
)
pgroup.add_argument(
'--range',
help='''Plot histogram ordinates over the given RANGE, specified as "LB,UB",
where LB and UB are the lower and upper bounds, respectively. For 1-D plots,
this is the Y axis. For 2-D plots, this is the colorbar axis.
(Default: full range.)''',
)
pgroup.add_argument(
'--postprocess-function',
help='''Names a function (as in module.function) that will be called just prior
to saving the plot. The function will be called as ``postprocess(hist, midpoints, binbounds)``
where ``hist`` is the histogram that was plotted, ``midpoints`` is the bin midpoints for
each dimension, and ``binbounds`` is the bin boundaries for each dimension for 2-D plots,
or None otherwise. The plot must be modified in place using the pyplot stateful interface.''',
)
parser.set_defaults(plotscale='energy')
def process_args(self, args):
self.plotscale = args.plotscale
self.input_h5 = h5py.File(args.input, 'r')
self.plot_output_filename = args.plot_output
self.hdf5_output_filename = args.hdf5_output
self.plot_contour = args.plot_contour
if args.title:
self.plottitle = args.title
if args.range:
self.plotrange = self.parse_range(args.range)
if args.firstdim:
self.dimensions.append(self.parse_dimspec(args.firstdim))
if not args.firstdim:
self.dimensions.append({'idim': 0, 'label': 'dimension 0'})
if args.enerzero:
lenerzero = args.enerzero.lower()
if lenerzero not in ('min', 'max'):
try:
self.enerzero = float(args.enerzero)
except ValueError:
raise ValueError('invalid energy zero point {!r}'.format(args.enerzero))
else:
self.enerzero = lenerzero
else:
self.enerzero = 'min'
self.avail_iter_start, self.avail_iter_stop = h5io.get_iter_range(self.input_h5['histograms'])
try:
self.avail_iter_step = h5io.get_iter_step(self.input_h5['histograms'])
except KeyError:
self.avail_iter_step = 1
log.info(
'HDF5 file {!r} contains data for iterations {} -- {} with a step of {}'.format(
args.input, self.avail_iter_start, self.avail_iter_stop, self.avail_iter_step
)
)
if args.postprocess_function:
self.postprocess_function = get_object(args.postprocess_function, path=['.'])
def parse_dimspec(self, dimspec):
dimdata = {}
match = re.match(r'([0-9]+)(?::(?:([^,]+),([^:,]+))?(?::(.*))?)?', dimspec)
if not match:
raise ValueError('invalid dimension specification {!r}'.format(dimspec))
(idim_txt, lb_txt, ub_txt, label) = match.groups()
try:
dimdata['idim'] = int(idim_txt)
if lb_txt:
dimdata['lb'] = float(lb_txt)
if ub_txt:
dimdata['ub'] = float(ub_txt)
if label:
dimdata['label'] = label
else:
dimdata['label'] = 'dimension {}'.format(dimdata['idim'])
except ValueError as e:
raise ValueError('invalid dimension specification {!r}: {!r}'.format(dimspec, e))
return dimdata
def parse_range(self, rangespec):
try:
(lbt, ubt) = rangespec.split(',')
return float(lbt), float(ubt)
except (ValueError, TypeError) as e:
raise ValueError('invalid range specification {!r}: {!r}'.format(rangespec, e))
def _ener_zero(self, hist):
hist = -np.log(hist)
if self.enerzero == 'min':
np.subtract(hist, hist.min(), out=hist, casting="unsafe")
elif self.enerzero == 'max':
np.subtract(hist, hist.max(), out=hist, casting="unsafe")
else:
np.subtract(hist, self.enerzero, out=hist, casting="unsafe")
return hist
class PlotSupports2D(PlotHistBase):
def __init__(self, parent):
super().__init__(parent)
def add_args(self, parser):
self.input_arg_group.add_argument(
'seconddim',
nargs='?',
metavar='ADDTLDIM',
help='''For instantaneous/average plots, plot along the given additional
dimension, producing a color map.''',
)
self.output_arg_group.add_argument(
'--text-output',
help='''Store plot data in a text format at TEXT_OUTPUT. This option is
only valid for 1-D histograms. (Default: no text output.)''',
)
def process_args(self, args):
self.text_output_filename = args.text_output
if args.seconddim is not None:
self.dimensions.append(self.parse_dimspec(args.seconddim))
def _do_1d_output(self, hist, idim, midpoints):
enehist = self._ener_zero(hist)
log10hist = np.log10(hist)
if self.hdf5_output_filename:
with h5py.File(self.hdf5_output_filename, 'w') as output_h5:
h5io.stamp_creator_data(output_h5)
output_h5.attrs['source_data'] = os.path.abspath(self.input_h5.filename)
output_h5.attrs['source_dimension'] = idim
output_h5['midpoints'] = midpoints
output_h5['histogram'] = hist
if self.text_output_filename:
with textio.NumericTextOutputFormatter(self.text_output_filename) as output_file:
output_file.write_header('source data: {} dimension {}'.format(os.path.abspath(self.input_h5.filename), idim))
output_file.write_header('column 0: midpoint of bin')
output_file.write_header('column 1: probability in bin')
output_file.write_header('column 2: -ln P')
output_file.write_header('column 3: log10 P')
np.savetxt(output_file, np.column_stack([midpoints, hist, enehist, log10hist]))
if self.plot_output_filename:
if self.plotscale == 'energy':
plothist = enehist
label = r'$-\ln\,P(x)$'
elif self.plotscale == 'log10':
plothist = log10hist
label = r'$\log_{10}\ P(x)$'
else:
plothist = hist
label = r'$P(x)$'
pyplot.figure()
pyplot.plot(midpoints, plothist)
pyplot.xlim(self.dimensions[0].get('lb'), self.dimensions[0].get('ub'))
if self.plotrange:
pyplot.ylim(*self.plotrange)
pyplot.xlabel(self.dimensions[0]['label'])
pyplot.ylabel(label)
if self.plottitle:
pyplot.title(self.plottitle)
if self.postprocess_function:
self.postprocess_function(plothist, midpoints, None)
pyplot.savefig(self.plot_output_filename)
def _do_2d_output(self, hist, idims, midpoints, binbounds):
enehist = self._ener_zero(hist)
log10hist = np.log10(hist)
if self.hdf5_output_filename:
with h5py.File(self.hdf5_output_filename, 'w') as output_h5:
h5io.stamp_creator_data(output_h5)
output_h5.attrs['source_data'] = os.path.abspath(self.input_h5.filename)
output_h5.attrs['source_dimensions'] = np.array(idims, np.min_scalar_type(max(idims)))
output_h5.attrs['source_dimension_labels'] = np.array([dim['label'] for dim in self.dimensions])
for idim in idims:
output_h5['midpoints_{}'.format(idim)] = midpoints[idim]
output_h5['histogram'] = hist
if self.plot_output_filename:
if self.plotscale == 'energy':
plothist = enehist
label = r'$-\ln\,P(x)$'
elif self.plotscale == 'log10':
plothist = log10hist
label = r'$\log_{10}\ P(\vec{x})$'
else:
plothist = hist
plothist[~np.isfinite(plothist)] = np.nan
label = r'$P(\vec{x})$'
try:
vmin, vmax = self.plotrange
except TypeError:
vmin, vmax = None, None
pyplot.figure()
# Transpose input so that axis 0 is displayed as x and axis 1 is displayed as y
# pyplot.imshow(plothist.T, interpolation='nearest', aspect='auto',
# extent=(midpoints[0][0], midpoints[0][-1], midpoints[1][0], midpoints[1][-1]),
# origin='lower', vmin=vmin, vmax=vmax)
# The following reproduces the former calls to imshow and colorbar
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
ax = pyplot.gca()
nui = NonUniformImage(
ax, extent=(midpoints[0][0], midpoints[0][-1], midpoints[1][0], midpoints[1][-1]), origin='lower', norm=norm
)
nui.set_data(midpoints[0], midpoints[1], plothist.T)
ax.add_image(nui)
ax.set_xlim(midpoints[0][0], midpoints[0][-1])
ax.set_ylim(midpoints[1][0], midpoints[1][-1])
cb = pyplot.colorbar(nui)
cb.set_label(label)
pyplot.xlabel(self.dimensions[0]['label'])
pyplot.xlim(self.dimensions[0].get('lb'), self.dimensions[0].get('ub'))
pyplot.ylabel(self.dimensions[1]['label'])
pyplot.ylim(self.dimensions[1].get('lb'), self.dimensions[1].get('ub'))
if self.plottitle:
pyplot.title(self.plottitle)
if self.postprocess_function:
self.postprocess_function(plothist, midpoints, binbounds)
if self.plot_contour:
pyplot.contour(midpoints[0], midpoints[1], plothist.T)
pyplot.savefig(self.plot_output_filename)
class InstantPlotHist(PlotSupports2D):
subcommand = 'instant'
help_text = 'plot probability distribution for a single WE iteration'
description = '''\
Plot a probability distribution for a single WE iteration. The probability
distribution must have been previously extracted with ``w_pdist`` (or, at
least, must be compatible with the output format of ``w_pdist``; see
``w_pdist --help`` for more information).
'''
def add_args(self, parser):
self.input_arg_group.add_argument(
'--iter',
metavar='N_ITER',
dest='n_iter',
type=int,
help='''Plot distribution for iteration N_ITER
(default: last completed iteration).''',
)
def process_args(self, args):
if args.n_iter:
self.n_iter = min(args.n_iter, self.avail_iter_stop - 1)
else:
self.n_iter = self.avail_iter_stop - 1
def do_instant_plot_1d(self):
'''Plot the histogram for iteration self.n_iter'''
idim = self.dimensions[0]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter = np.searchsorted(n_iters, self.n_iter)
binbounds = self.input_h5['binbounds_{}'.format(idim)][...]
midpoints = self.input_h5['midpoints_{}'.format(idim)][...]
hist = self.input_h5['histograms'][iiter]
# Average over other dimensions
hist = sum_except_along(hist, idim)
normhistnd(hist, [binbounds])
self._do_1d_output(hist, idim, midpoints)
def do_instant_plot_2d(self):
'''Plot the histogram for iteration self.n_iter'''
idim0 = self.dimensions[0]['idim']
idim1 = self.dimensions[1]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter = np.searchsorted(n_iters, self.n_iter)
binbounds_0 = self.input_h5['binbounds_{}'.format(idim0)][...]
midpoints_0 = self.input_h5['midpoints_{}'.format(idim0)][...]
binbounds_1 = self.input_h5['binbounds_{}'.format(idim1)][...]
midpoints_1 = self.input_h5['midpoints_{}'.format(idim1)][...]
hist = self.input_h5['histograms'][iiter]
# Average over other dimensions
hist = sum_except_along(hist, [idim0, idim1])
normhistnd(hist, [binbounds_0, binbounds_1])
self._do_2d_output(hist, [idim0, idim1], [midpoints_0, midpoints_1], [binbounds_0, binbounds_1])
def go(self):
if len(self.dimensions) == 2:
self.do_instant_plot_2d()
else:
self.do_instant_plot_1d()
class AveragePlotHist(PlotSupports2D):
subcommand = 'average'
help_text = 'plot average of a probability distribution over a WE simulation'
description = '''\
Plot a probability distribution averaged over multiple iterations. The
probability distribution must have been previously extracted with ``w_pdist``
(or, at least, must be compatible with the output format of ``w_pdist``; see
``w_pdist --help`` for more information).
'''
def add_args(self, parser):
igroup = self.input_arg_group
igroup.add_argument(
'--first-iter',
dest='first_iter',
type=int,
metavar='N_ITER',
default=1,
help='''Begin averaging at iteration N_ITER (default: %(default)d).''',
)
igroup.add_argument(
'--last-iter',
dest='last_iter',
type=int,
metavar='N_ITER',
help='''Conclude averaging with N_ITER, inclusive (default: last completed iteration).''',
)
def process_args(self, args):
if args.first_iter:
self.iter_start = max(args.first_iter, self.avail_iter_start)
else:
self.iter_start = self.avail_iter_start
if args.last_iter:
self.iter_stop = min(args.last_iter + 1, self.avail_iter_stop)
else:
self.iter_stop = self.avail_iter_stop
def do_average_plot_1d(self):
'''Plot the average histogram for iterations self.iter_start to self.iter_stop'''
idim = self.dimensions[0]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter_start = np.searchsorted(n_iters, self.iter_start)
iiter_stop = np.searchsorted(n_iters, self.iter_stop)
binbounds = self.input_h5['binbounds_{}'.format(idim)][...]
midpoints = self.input_h5['midpoints_{}'.format(idim)][...]
# hist = self.input_h5['histograms'][iiter_start:iiter_stop]
for iiter in range(iiter_start, iiter_stop):
iter_hist = sum_except_along(self.input_h5['histograms'][iiter], idim)
if iiter == iiter_start:
hist = iter_hist
else:
hist += iter_hist
del iter_hist
normhistnd(hist, [binbounds])
self._do_1d_output(hist, idim, midpoints)
def do_average_plot_2d(self):
'''Plot the histogram for iteration self.n_iter'''
idim0 = self.dimensions[0]['idim']
idim1 = self.dimensions[1]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter_start = np.searchsorted(n_iters, self.iter_start)
iiter_stop = np.searchsorted(n_iters, self.iter_stop)
binbounds_0 = self.input_h5['binbounds_{}'.format(idim0)][...]
midpoints_0 = self.input_h5['midpoints_{}'.format(idim0)][...]
binbounds_1 = self.input_h5['binbounds_{}'.format(idim1)][...]
midpoints_1 = self.input_h5['midpoints_{}'.format(idim1)][...]
for iiter in range(iiter_start, iiter_stop):
iter_hist = sum_except_along(self.input_h5['histograms'][iiter], [idim0, idim1])
if iiter == iiter_start:
hist = iter_hist
else:
hist += iter_hist
normhistnd(hist, [binbounds_0, binbounds_1])
self._do_2d_output(hist, [idim0, idim1], [midpoints_0, midpoints_1], [binbounds_0, binbounds_1])
def go(self):
if len(self.dimensions) == 2:
self.do_average_plot_2d()
else:
self.do_average_plot_1d()
class EvolutionPlotHist(PlotHistBase):
subcommand = 'evolution'
help_text = 'plot evolution of a probability distribution over the course of a WE simulation'
description = '''\
Plot a probability distribution as it evolves over iterations. The
probability distribution must have been previously extracted with ``w_pdist``
(or, at least, must be compatible with the output format of ``w_pdist``; see
``w_pdist --help`` for more information).
'''
def add_args(self, parser):
igroup = self.input_arg_group
igroup.add_argument(
'--first-iter',
dest='first_iter',
type=int,
metavar='N_ITER',
default=1,
help='''Begin analysis at iteration N_ITER (default: %(default)d).''',
)
igroup.add_argument(
'--last-iter',
dest='last_iter',
type=int,
metavar='N_ITER',
help='''Conclude analysis with N_ITER, inclusive (default: last completed iteration).''',
)
igroup.add_argument(
'--step-iter', dest='step_iter', type=int, metavar='STEP', help='''Average in blocks of STEP iterations.'''
)
def process_args(self, args):
if args.first_iter:
self.iter_start = max(args.first_iter, self.avail_iter_start)
else:
self.iter_start = self.avail_iter_start
if args.last_iter:
self.iter_stop = min(args.last_iter + 1, self.avail_iter_stop)
else:
self.iter_stop = self.avail_iter_stop
if args.step_iter:
self.iter_step = max(args.step_iter, self.avail_iter_step)
else:
self.iter_step = self.avail_iter_step
log.info('using data for iterations {} -- {} with a step of {}'.format(self.iter_start, self.iter_stop, self.iter_step))
def go(self):
'''Plot the evolution of the histogram for iterations self.iter_start to self.iter_stop'''
idim = self.dimensions[0]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter_start = np.searchsorted(n_iters, self.iter_start)
iiter_stop = np.searchsorted(n_iters, self.iter_stop)
binbounds = self.input_h5['binbounds_{}'.format(idim)][...]
midpoints = self.input_h5['midpoints_{}'.format(idim)][...]
hists_ds = self.input_h5['histograms']
itercount = self.iter_stop - self.iter_start
# We always round down, so that we don't have a dangling partial block at the end
nblocks = itercount // self.iter_step
block_iters = np.empty((nblocks, 2), dtype=n_iters.dtype)
blocked_hists = np.zeros((nblocks, hists_ds.shape[1 + idim]), dtype=hists_ds.dtype)
for iblock, istart in enumerate(range(iiter_start, iiter_start + nblocks * self.iter_step, self.iter_step)):
istop = min(istart + self.iter_step, iiter_stop)
histslice = hists_ds[istart:istop]
# Sum over time
histslice = np.add.reduce(histslice, axis=0)
# Sum over other dimensions
blocked_hists[iblock] = sum_except_along(histslice, idim)
# Normalize
normhistnd(blocked_hists[iblock], [binbounds])
block_iters[iblock, 0] = n_iters[istart]
block_iters[iblock, 1] = n_iters[istop - 1] + 1
# enehists = -np.log(blocked_hists)
enehists = self._ener_zero(blocked_hists)
log10hists = np.log10(blocked_hists)
if self.hdf5_output_filename:
with h5py.File(self.hdf5_output_filename, 'w') as output_h5:
h5io.stamp_creator_data(output_h5)
output_h5.attrs['source_data'] = os.path.abspath(self.input_h5.filename)
output_h5.attrs['source_dimension'] = idim
output_h5['midpoints'] = midpoints
output_h5['histograms'] = blocked_hists
output_h5['n_iter'] = block_iters
if self.plot_output_filename:
if self.plotscale == 'energy':
plothist = enehists
label = r'$-\ln\,P(x)$'
elif self.plotscale == 'log10':
plothist = log10hists
label = r'$\log_{10}\ P(x)$'
else:
plothist = blocked_hists
label = r'$P(x)$'
try:
vmin, vmax = self.plotrange
except TypeError:
vmin, vmax = None, None
pyplot.figure()
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
ax = pyplot.gca()
nui = NonUniformImage(
ax, extent=(midpoints[0], midpoints[-1], block_iters[0, -1], block_iters[-1, -1]), origin='lower', norm=norm
)
# not sure why plothist works but plothist.T doesn't, and the opposite is true
# for _do_2d_output
nui.set_data(midpoints, block_iters[:, -1], plothist)
ax.add_image(nui)
ax.set_xlim(midpoints[0], midpoints[-1])
ax.set_ylim(block_iters[0, -1], block_iters[-1, -1])
cb = pyplot.colorbar(nui)
cb.set_label(label)
pyplot.xlabel(self.dimensions[0]['label'])
pyplot.xlim(self.dimensions[0].get('lb'), self.dimensions[0].get('ub'))
pyplot.ylabel('WE Iteration')
if self.plottitle:
pyplot.title(self.plottitle)
if self.postprocess_function:
self.postprocess_function(plothist, midpoints, binbounds)
pyplot.savefig(self.plot_output_filename)
class PlotHistTool(WESTMasterCommand):
prog = 'plothist'
subparsers_title = 'plotting modes'
subcommands = [InstantPlotHist, AveragePlotHist, EvolutionPlotHist]
description = '''\
Plot probability density functions (histograms) generated by w_pdist or other
programs conforming to the same output format. This program operates in one of
three modes:
instant
Plot 1-D and 2-D histograms for an individual iteration. See
``plothist instant --help`` for more information.
average
Plot 1-D and 2-D histograms, averaged over several iterations. See
``plothist average --help`` for more information.
evolution
Plot the time evolution 1-D histograms as waterfall (heat map) plots.
See ``plothist evolution --help`` for more information.
This program takes the output of ``w_pdist`` as input (see ``w_pdist --help``
for more information), and can generate any kind of graphical output that
matplotlib supports.
------------------------------------------------------------------------------
Command-line options
------------------------------------------------------------------------------
'''
def entry_point():
PlotHistTool().main()
if __name__ == '__main__':
entry_point()
|
78f9b886fcf294b16cb2e967b3d7ab182ee947b8
|
f42cd8413ac5bec6526cc1013253d00fd9becc62
|
/examples/test_examples.py
|
4e29a5635dbd50a97153c83bdefea34914aff201
|
[
"Apache-2.0"
] |
permissive
|
planetlabs/planet-client-python
|
dda964d1d0a02e6413965fffa2f24f8a0b9d51f9
|
8b29a9300f8a144cc56a171f102b1a068fd6b692
|
refs/heads/main
| 2023-08-08T07:24:51.429307
| 2023-07-20T19:49:08
| 2023-07-20T19:49:08
| 37,222,273
| 269
| 89
|
Apache-2.0
| 2023-08-01T00:52:42
| 2015-06-10T20:58:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,980
|
py
|
test_examples.py
|
# Copyright 2020 Planet Labs, Inc.
# Copyright 2022 Planet Labs PBC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test example scripts in this directory
This is not an example script. This is a pytest module that provides automated
testing of the example scripts in this directory to ensure they are always
up-to-date and run successfully.
YAY automated testing and up-to-date examples!
"""
import logging
from pathlib import Path
import os
import subprocess
import sys
import pytest
LOGGER = logging.getLogger(__name__)
# All python files in the current directory except this one
# ref: https://stackoverflow.com/a/56813896
SCRIPTS = [
s for s in Path(__file__).parent.resolve().glob('*.py')
if s.name != Path(__file__).name
]
# use the script name in the test name
def idfn(script_path):
return script_path.name
# provide an environment variable that points to the test temporary download
# directory, to be used by the script for all downloads
@pytest.mark.parametrize('script', SCRIPTS, ids=idfn)
def test_example_script_execution(script, tmpdir):
completed = subprocess.run(
[sys.executable, str(script)],
env={
'TEST_DOWNLOAD_DIR': str(tmpdir),
'PL_API_KEY': os.getenv('PL_API_KEY')
},
stderr=subprocess.PIPE # capture stdout for reporting
)
assert not completed.returncode, (
'script failed with following std_err output:\n'
f'{str(completed.stderr, "utf-8")}')
|
40a877126e5d3db3539c58e1ecf0c1a7324f4fab
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoBTag/SecondaryVertex/python/pfInclusiveSecondaryVertexFinderNegativeTagInfos_cfi.py
|
e52505593f5951527436e9ed3f2195b1a5108d86
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
pfInclusiveSecondaryVertexFinderNegativeTagInfos_cfi.py
|
import FWCore.ParameterSet.Config as cms
from RecoBTag.SecondaryVertex.pfInclusiveSecondaryVertexFinderTagInfos_cfi import *
pfInclusiveSecondaryVertexFinderNegativeTagInfos = pfInclusiveSecondaryVertexFinderTagInfos.clone(
extSVDeltaRToJet = -0.3,
extSVCollection = 'inclusiveCandidateNegativeSecondaryVertices',
vertexCuts = dict(
distVal2dMin = -2.5,
distVal2dMax = -0.01,
distSig2dMin = -99999.9,
distSig2dMax = -2.0,
maxDeltaRToJetAxis = -0.5
)
)
|
b4100eb7549ae12f0e3ee621699f4a29afa2a893
|
9b66a01edfddf1b48d0fe99645658f2632444546
|
/models/pytorch/pna/scalers.py
|
8053b719697935db38f59595a917bc5f4278514f
|
[
"MIT"
] |
permissive
|
lukecavabarrett/pna
|
b1893f08153246c6f644677458992087794ac15b
|
0c630c2e2d88bb1ef784c850dd8f3a069fcd9489
|
refs/heads/master
| 2022-08-19T18:14:32.116725
| 2022-08-07T11:30:17
| 2022-08-07T11:30:17
| 252,297,818
| 321
| 52
|
MIT
| 2022-07-25T22:29:20
| 2020-04-01T22:11:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
scalers.py
|
import torch
# each scaler is a function that takes as input X (B x N x Din), adj (B x N x N) and
# avg_d (dictionary containing averages over training set) and returns X_scaled (B x N x Din) as output
def scale_identity(X, adj, avg_d=None):
return X
def scale_amplification(X, adj, avg_d=None):
# log(D + 1) / d * X where d is the average of the ``log(D + 1)`` in the training set
D = torch.sum(adj, -1)
scale = (torch.log(D + 1) / avg_d["log"]).unsqueeze(-1)
X_scaled = torch.mul(scale, X)
return X_scaled
def scale_attenuation(X, adj, avg_d=None):
# (log(D + 1))^-1 / d * X where d is the average of the ``log(D + 1))^-1`` in the training set
D = torch.sum(adj, -1)
scale = (avg_d["log"] / torch.log(D + 1)).unsqueeze(-1)
X_scaled = torch.mul(scale, X)
return X_scaled
def scale_linear(X, adj, avg_d=None):
# d^{-1} D X where d is the average degree in the training set
D = torch.sum(adj, -1, keepdim=True)
X_scaled = D * X / avg_d["lin"]
return X_scaled
def scale_inverse_linear(X, adj, avg_d=None):
# d D^{-1} X where d is the average degree in the training set
D = torch.sum(adj, -1, keepdim=True)
X_scaled = avg_d["lin"] * X / D
return X_scaled
SCALERS = {'identity': scale_identity, 'amplification': scale_amplification, 'attenuation': scale_attenuation,
'linear': scale_linear, 'inverse_linear': scale_inverse_linear}
|
1aa9aea1cf01c44f6b6ab8375aa73fd08291c3ea
|
c67f2d0677f8870bc1d970891bbe31345ea55ce2
|
/zippy/benchmarks/src/micro/object-layout-change.py
|
9c5bc206498a2ddb7ce4276a993b81cf908e3902
|
[
"BSD-3-Clause"
] |
permissive
|
securesystemslab/zippy
|
a5a1ecf5c688504d8d16128ce901406ffd6f32c2
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
refs/heads/master
| 2022-07-05T23:45:36.330407
| 2018-07-10T22:17:32
| 2018-07-10T22:17:32
| 67,824,983
| 324
| 27
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
object-layout-change.py
|
# zwei 05/02/2015
# object layout change
import time
class Foo:
def __init__(self, a):
self.a = a
def mod(self, b):
self.b = b % 5
def dochange(n):
res = 0
for i in range(n):
f = Foo(i)
f.mod(res)
res += f.a + f.b
return res
def main(n):
for i in range(n):
res = dochange(100)
def measure(n):
print("Start timing...")
start = time.time()
main(n)
duration = "%.3f\n" % (time.time() - start)
print("object-layout-change: " + duration)
for i in range(100):
main(500)
measure(1000000)
|
0fc47b95948645c5cf05db7f96fe78455754bf73
|
150a7b11cb531f8bc2a045aefcf2ebe1d151efa3
|
/ocs_ci/ocs/clients.py
|
a2d88dbbb3558c4c83d3f35ef725595a0521c4c9
|
[
"MIT"
] |
permissive
|
red-hat-storage/ocs-ci
|
c7ac414e1b86552da0439223dfa9bca39977f31a
|
5e9e504957403148e413326f65c3769bf9d8eb39
|
refs/heads/master
| 2023-08-17T16:19:51.154403
| 2023-08-17T13:27:12
| 2023-08-17T13:27:12
| 179,558,938
| 146
| 210
|
MIT
| 2023-09-14T16:38:44
| 2019-04-04T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,913
|
py
|
clients.py
|
import logging
import paramiko
import string
from time import sleep
from ocs_ci.ocs.exceptions import CommandFailed, TimeoutException
log = logging
class WinNode(object):
def __init__(self, **kw):
self.login = "Administrator"
self.password = "CephUser123"
self.ip_address = kw["ip_address"]
self.private_ip = kw["private_ip"]
def win_exec(self, ps_command, timeout=180):
log.info("Running powershell`s command `{}`".format(ps_command))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(self.ip_address, username=self.login, password=self.password)
command = 'powershell -Command "& {{{}}}"'.format(ps_command)
chan_ssh = client.get_transport().open_session()
chan_ssh.exec_command(command)
for i in range(0, timeout):
sleep(1)
if chan_ssh.exit_status_ready():
break
else:
raise TimeoutException("Timeout")
output = dict()
output["exit_code"] = chan_ssh.recv_exit_status()
output["stdout"] = chan_ssh.recv(-1)
output["stderr"] = chan_ssh.recv_stderr(-1)
if not bool(output["stderr"]) and output["exit_code"] == 0:
return output
else:
raise CommandFailed(output["stderr"])
def start_iscsi_initiator(self):
self.win_exec("Start-Service msiscsi")
self.win_exec("Set-Service msiscsi -startuptype 'automatic'")
def get_iscsi_initiator_name(self):
output = self.win_exec("(Get-InitiatorPort).NodeAddress")
stdout = output["stdout"].strip()
return stdout
def create_new_target(self, ip, port=3260):
command = "New-IscsiTargetPortal -TargetPortalAddress {} -TargetPortalPortNumber {}".format(
ip, port
)
self.win_exec(command)
def delete_target(self):
pass
def connect_to_target(self, ip, username, password):
command = (
"Connect-IscsiTarget -NodeAddress iqn.2003-01.com.redhat.iscsi-gw:ceph-igw"
r" -IsMultipathEnabled \$True -TargetPortalAddress {} -AuthenticationType ONEWAYCHAP"
" -ChapUsername {} -ChapSecret {}".format(ip, username, password)
)
self.win_exec(command)
def disconnect_from_target(
self,
):
command = (
"Disconnect-IscsiTarget -NodeAddress "
"iqn.2003-01.com.redhat.iscsi-gw:ceph-igw -Confirm:$false"
)
self.win_exec(command)
def create_disk(self, number):
letters = list(string.ascii_uppercase)[3 : 3 + number]
for disk, part in zip(letters, list(range(1, 1 + number))):
self.win_exec("Initialize-Disk -Number {} -PartitionStyle MBR".format(part))
self.win_exec(
"New-Partition -DiskNumber {0} -UseMaximumSize -DriveLetter {1}".format(
part, disk
)
)
self.win_exec("Get-Volume -DriveLetter {}".format(disk))
self.win_exec("Format-Volume -DriveLetter {} -FileSystem NTFS".format(disk))
def check_disk(self, number):
command = "Get-Disk -Number {}".format(number)
self.win_exec(command)
def create_fio_job_options(self, job_options):
command = (
"Set-Content -Value \"{}\" -Path 'C:\\Program Files\\fio\\test.fio'".format(
job_options
)
)
self.win_exec(command)
def run_fio_test(self):
log.info("starting fio test")
try:
output = self.win_exec(
"cd 'C:\\Program Files\\fio\\'; .\\fio.exe .\\test.fio", timeout=4800
)
except CommandFailed:
log.exception("fio test filed")
return 1
else:
log.info(output["stdout"])
return 0
|
8f607ead3e3ed7a87915ae539db37c2729533c31
|
8caa926fa767898a0a8fba1d5b3bef73d96be410
|
/sagemaker-pyspark-sdk/tests/algorithms/linear_learner_multi_class_classifier_test.py
|
ad455eb812c1cd3595cac620559091e0af0b7847
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-spark
|
72122fd3675487050965c9d35f3b26295c9f174b
|
e27ccff05cd4f062b67712411b3c9ac95308502c
|
refs/heads/master
| 2023-05-11T12:38:13.951884
| 2022-08-26T16:19:57
| 2022-08-26T16:19:57
| 111,010,291
| 297
| 141
|
Apache-2.0
| 2023-05-10T02:39:27
| 2017-11-16T18:58:56
|
Scala
|
UTF-8
|
Python
| false
| false
| 22,477
|
py
|
linear_learner_multi_class_classifier_test.py
|
import os
import pytest
from pyspark import SparkConf, SparkContext
from sagemaker_pyspark import (S3DataPath, EndpointCreationPolicy, RandomNamePolicyFactory,
SageMakerClients, IAMRole, classpath_jars)
from sagemaker_pyspark.algorithms import LinearLearnerMultiClassClassifier
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
from sagemaker_pyspark.transformation.deserializers \
import LinearLearnerMultiClassClassifierProtobufResponseRowDeserializer
@pytest.fixture(autouse=True)
def with_spark_context():
os.environ['SPARK_CLASSPATH'] = ":".join(classpath_jars())
conf = (SparkConf()
.set("spark.driver.extraClassPath", os.environ['SPARK_CLASSPATH']))
if SparkContext._active_spark_context is None:
SparkContext(conf=conf)
yield SparkContext._active_spark_context
# TearDown
SparkContext.stop(SparkContext._active_spark_context)
def get_linear_learner_multi_class_classifier():
training_instance_type = "c4.8xlarge"
training_instance_count = 3
endpoint_instance_type = "c4.8xlarge"
endpoint_initial_instance_count = 3
estimator = LinearLearnerMultiClassClassifier(
trainingInstanceType=training_instance_type,
trainingInstanceCount=training_instance_count,
endpointInstanceType=endpoint_instance_type,
endpointInitialInstanceCount=endpoint_initial_instance_count,
sagemakerRole=IAMRole("some-role"),
trainingProjectedColumns=None,
trainingS3DataDistribution="by-key",
trainingInputMode="File",
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_TRANSFORM,
modelPrependInputRowsToTransformationRows=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid="sagemaker")
return estimator
def test_can_create_classifier_from_configured_iam_role():
training_instance_type = "c4.8xlarge"
training_instance_count = 3
endpoint_instance_type = "c4.8xlarge"
endpoint_initial_instance_count = 3
estimator = LinearLearnerMultiClassClassifier(
trainingInstanceType=training_instance_type,
trainingInstanceCount=training_instance_count,
endpointInstanceType=endpoint_instance_type,
endpointInitialInstanceCount=endpoint_initial_instance_count,
trainingProjectedColumns=None,
trainingS3DataDistribution="by-key",
trainingInputMode="File",
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_TRANSFORM,
modelPrependInputRowsToTransformationRows=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid="sagemaker")
return estimator
def test_linear_learner_multi_class_classifier_has_correct_defaults():
estimator = get_linear_learner_multi_class_classifier()
assert estimator.trainingSparkDataFormat == "sagemaker"
def test_linearLearnerMultiClassClassifier_passes_correct_params_to_scala():
training_instance_type = "c4.8xlarge"
training_instance_count = 3
endpoint_instance_type = "c4.8xlarge"
endpoint_initial_instance_count = 3
training_bucket = "random-bucket"
input_prefix = "linear-learner-multi-class-classifier-training"
output_prefix = "linear-learner-multi-class-classifier-out"
integTestingRole = "arn:aws:iam::123456789:role/SageMakerRole"
estimator = LinearLearnerMultiClassClassifier(
trainingInstanceType=training_instance_type,
trainingInstanceCount=training_instance_count,
endpointInstanceType=endpoint_instance_type,
endpointInitialInstanceCount=endpoint_initial_instance_count,
sagemakerRole=IAMRole(integTestingRole),
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=LinearLearnerMultiClassClassifierProtobufResponseRowDeserializer(),
trainingInstanceVolumeSizeInGB=2048,
trainingInputS3DataPath=S3DataPath(training_bucket, input_prefix),
trainingOutputS3DataPath=S3DataPath(training_bucket, output_prefix),
trainingMaxRuntimeInSeconds=1,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_TRANSFORM,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid="sagemaker")
assert estimator.trainingInputS3DataPath.bucket == training_bucket
assert estimator.trainingInputS3DataPath.objectPath == input_prefix
assert estimator.trainingInstanceCount == training_instance_count
assert estimator.trainingInstanceType == training_instance_type
assert estimator.endpointInstanceType == endpoint_instance_type
assert estimator.endpointInitialInstanceCount == endpoint_initial_instance_count
assert estimator.trainingInstanceVolumeSizeInGB == 2048
assert estimator.trainingMaxRuntimeInSeconds == 1
assert estimator.trainingKmsKeyId is None
def test_linearLearnerMultiClassClassifier_validates_feature_dim():
estimator = get_linear_learner_multi_class_classifier()
estimator.setFeatureDim(2)
assert estimator.getFeatureDim() == 2
# passing 0 should fail
with pytest.raises(ValueError):
estimator.setFeatureDim(0)
# passing a string should fail
with pytest.raises(TypeError):
estimator.setFeatureDim("0")
estimator.setFeatureDim(2)
estimator._transfer_params_to_java()
assert estimator.getFeatureDim() == estimator._call_java("getFeatureDim")
def test_linearLearnerMultiClassClassifier_validates_mini_batch_size():
estimator = get_linear_learner_multi_class_classifier()
estimator.setMiniBatchSize(3)
assert estimator.getMiniBatchSize() == 3
with pytest.raises(ValueError):
estimator.setMiniBatchSize(0)
estimator.setMiniBatchSize(3)
estimator._transfer_params_to_java()
assert estimator.getMiniBatchSize() == estimator._call_java("getMiniBatchSize")
def test_linearLearnerMultiClassClassifier_validates_epochs():
estimator = get_linear_learner_multi_class_classifier()
estimator.setEpochs(3)
assert estimator.getEpochs() == 3
with pytest.raises(ValueError):
estimator.setEpochs(-1)
estimator.setEpochs(2)
estimator._transfer_params_to_java()
assert estimator.getEpochs() == estimator._call_java("getEpochs")
def test_linearLearnerMultiClassClassifier_validates_use_bias():
estimator = get_linear_learner_multi_class_classifier()
estimator.setUseBias("True")
assert estimator.getUseBias() is True
with pytest.raises(ValueError):
estimator.setUseBias("some-value")
estimator.setUseBias("False")
estimator._transfer_params_to_java()
assert estimator.getUseBias() == estimator._call_java("getUseBias")
def test_linearLearnerMultiClassClassifier_validates_num_models():
estimator = get_linear_learner_multi_class_classifier()
estimator.setNumModels(3)
assert estimator.getNumModels() == "3"
estimator.setNumModels("auto")
assert estimator.getNumModels() == "auto"
with pytest.raises(ValueError):
estimator.setNumModels(-1)
with pytest.raises(ValueError):
estimator.setNumModels("some-value")
estimator.setNumModels("2")
estimator._transfer_params_to_java()
assert estimator.getNumModels() == estimator._call_java("getNumModels")
def test_linearLearnerMultiClassClassifier_validates_num_calibration_samples():
estimator = get_linear_learner_multi_class_classifier()
estimator.setNumCalibrationSamples(3)
assert estimator.getNumCalibrationSamples() == 3
with pytest.raises(ValueError):
estimator.setNumCalibrationSamples(-1)
estimator.setNumCalibrationSamples(2)
estimator._transfer_params_to_java()
assert estimator.getNumCalibrationSamples() == estimator._call_java("getNumCalibrationSamples")
def test_linearLearnerMultiClassClassifier_validates_init_method():
estimator = get_linear_learner_multi_class_classifier()
estimator.setInitMethod("uniform")
assert estimator.getInitMethod() == "uniform"
with pytest.raises(ValueError):
estimator.setInitMethod("some-value")
estimator.setInitMethod("normal")
estimator._transfer_params_to_java()
assert estimator.getInitMethod() == estimator._call_java("getInitMethod")
def test_linearLearnerMultiClassClassifier_validates_init_scale():
estimator = get_linear_learner_multi_class_classifier()
estimator.setInitScale(1)
assert estimator.getInitScale() == 1
with pytest.raises(ValueError):
estimator.setInitScale(0)
estimator.setInitScale(0.5)
estimator._transfer_params_to_java()
assert estimator.getInitScale() == estimator._call_java("getInitScale")
def test_linearLearnerMultiClassClassifier_validates_init_sigma():
estimator = get_linear_learner_multi_class_classifier()
estimator.setInitSigma(0.5)
assert estimator.getInitSigma() == 0.5
with pytest.raises(ValueError):
estimator.setInitSigma(0)
estimator.setInitSigma(0.1)
estimator._transfer_params_to_java()
assert estimator.getInitSigma() == estimator._call_java("getInitSigma")
def test_linearLearnerMultiClassClassifier_validates_optimizer():
estimator = get_linear_learner_multi_class_classifier()
estimator.setOptimizer("adam")
assert estimator.getOptimizer() == "adam"
with pytest.raises(ValueError):
estimator.setOptimizer("some-value")
estimator.setOptimizer("sgd")
estimator._transfer_params_to_java()
assert estimator.getOptimizer() == estimator._call_java("getOptimizer")
def test_linearLearnerMultiClassClassifier_validates_loss():
estimator = get_linear_learner_multi_class_classifier()
estimator.setLoss("auto")
assert estimator.getLoss() == "auto"
with pytest.raises(ValueError):
estimator.setLoss("some-value")
estimator.setLoss("logistic")
estimator._transfer_params_to_java()
assert estimator.getLoss() == estimator._call_java("getLoss")
def test_linearLearnerMultiClassClassifier_validates_wd():
estimator = get_linear_learner_multi_class_classifier()
estimator.setWd(0.5)
assert estimator.getWd() == 0.5
with pytest.raises(ValueError):
estimator.setWd(-1)
estimator.setWd(0.1)
estimator._transfer_params_to_java()
assert estimator.getWd() == estimator._call_java("getWd")
def test_linearLearnerMultiClassClassifier_validates_l1():
estimator = get_linear_learner_multi_class_classifier()
estimator.setL1(0.5)
assert estimator.getL1() == 0.5
with pytest.raises(ValueError):
estimator.setL1(-1)
estimator.setL1(0.1)
estimator._transfer_params_to_java()
assert estimator.getL1() == estimator._call_java("getL1")
def test_linearLearnerMultiClassClassifier_validates_momentum():
estimator = get_linear_learner_multi_class_classifier()
estimator.setMomentum(0.5)
assert estimator.getMomentum() == 0.5
with pytest.raises(ValueError):
estimator.setMomentum(3)
estimator.setMomentum(0.1)
estimator._transfer_params_to_java()
assert estimator.getMomentum() == estimator._call_java("getMomentum")
def test_linearLearnerMultiClassClassifier_validates_learning_rate():
estimator = get_linear_learner_multi_class_classifier()
estimator.setLearningRate(0.5)
assert estimator.getLearningRate() == "0.5"
estimator.setLearningRate("auto")
assert estimator.getLearningRate() == "auto"
with pytest.raises(ValueError):
estimator.setLearningRate(-1)
with pytest.raises(ValueError):
estimator.setLearningRate("some-value")
estimator.setLearningRate("0.1")
estimator._transfer_params_to_java()
assert estimator.getLearningRate() == estimator._call_java("getLearningRate")
def test_linearLearnerMultiClassClassifier_validates_beta_1():
estimator = get_linear_learner_multi_class_classifier()
estimator.setBeta1(0.5)
assert estimator.getBeta1() == 0.5
with pytest.raises(ValueError):
estimator.setBeta1(3)
estimator.setBeta1(0.1)
estimator._transfer_params_to_java()
assert estimator.getBeta1() == estimator._call_java("getBeta1")
def test_linearLearnerMultiClassClassifier_validates_beta_2():
estimator = get_linear_learner_multi_class_classifier()
estimator.setBeta2(0.5)
assert estimator.getBeta2() == 0.5
with pytest.raises(ValueError):
estimator.setBeta2(3)
estimator.setBeta2(0.1)
estimator._transfer_params_to_java()
assert estimator.getBeta2() == estimator._call_java("getBeta2")
def test_linearLearnerMultiClassClassifier_validates_bias_lr_mult():
estimator = get_linear_learner_multi_class_classifier()
estimator.setBiasLrMult(0.5)
assert estimator.getBiasLrMult() == 0.5
with pytest.raises(ValueError):
estimator.setBiasLrMult(0)
estimator.setBiasLrMult(0.1)
estimator._transfer_params_to_java()
assert estimator.getBiasLrMult() == estimator._call_java("getBiasLrMult")
def test_linearLearnerMultiClassClassifier_validates_bias_wd_mult():
estimator = get_linear_learner_multi_class_classifier()
estimator.setBiasWdMult(0.5)
assert estimator.getBiasWdMult() == 0.5
with pytest.raises(ValueError):
estimator.setBiasWdMult(-1)
estimator.setBiasWdMult(0.1)
estimator._transfer_params_to_java()
assert estimator.getBiasWdMult() == estimator._call_java("getBiasWdMult")
def test_linearLearnerMultiClassClassifier_validates_use_lr_scheduler():
estimator = get_linear_learner_multi_class_classifier()
estimator.setUseLrScheduler("True")
assert estimator.getUseLrScheduler() is True
with pytest.raises(ValueError):
estimator.setUseLrScheduler("some-value")
estimator.setUseLrScheduler("False")
estimator._transfer_params_to_java()
assert estimator.getUseLrScheduler() == estimator._call_java("getUseLrScheduler")
def test_linearLearnerMultiClassClassifier_validates_lr_scheduler_step():
estimator = get_linear_learner_multi_class_classifier()
estimator.setLrSchedulerStep(5)
assert estimator.getLrSchedulerStep() == 5
with pytest.raises(ValueError):
estimator.setLrSchedulerStep(0)
estimator.setLrSchedulerStep(1)
estimator._transfer_params_to_java()
assert estimator.getLrSchedulerStep() == estimator._call_java("getLrSchedulerStep")
def test_linearLearnerMultiClassClassifier_validates_lr_scheduler_factor():
estimator = get_linear_learner_multi_class_classifier()
estimator.setLrSchedulerFactor(0.5)
assert estimator.getLrSchedulerFactor() == 0.5
with pytest.raises(ValueError):
estimator.setLrSchedulerFactor(3)
estimator.setLrSchedulerFactor(0.1)
estimator._transfer_params_to_java()
assert estimator.getLrSchedulerFactor() == estimator._call_java("getLrSchedulerFactor")
def test_linearLearnerMultiClassClassifier_validates_lr_scheduler_minimum_lr():
estimator = get_linear_learner_multi_class_classifier()
estimator.setLrSchedulerMinimumLr(0.5)
assert estimator.getLrSchedulerMinimumLr() == 0.5
with pytest.raises(ValueError):
estimator.setLrSchedulerMinimumLr(0)
estimator.setLrSchedulerMinimumLr(0.1)
estimator._transfer_params_to_java()
assert estimator.getLrSchedulerMinimumLr() == estimator._call_java("getLrSchedulerMinimumLr")
def test_linearLearnerMultiClassClassifier_validates_num_classes():
estimator = get_linear_learner_multi_class_classifier()
estimator.setNumClasses(5)
assert estimator.getNumClasses() == 5
with pytest.raises(ValueError):
estimator.setNumClasses(2)
estimator.setNumClasses(3)
estimator._transfer_params_to_java()
assert estimator.getNumClasses() \
== estimator._call_java("getNumClasses")
def test_linearLearnerMultiClassClassifier_validates_accuracy_top_k():
estimator = get_linear_learner_multi_class_classifier()
estimator.setAccuracyTopK(5)
assert estimator.getAccuracyTopK() == 5
with pytest.raises(ValueError):
estimator.setAccuracyTopK(0)
estimator.setAccuracyTopK(2)
estimator._transfer_params_to_java()
assert estimator.getAccuracyTopK() == estimator._call_java("getAccuracyTopK")
def test_linearLearnerMultiClassClassifier_validates_balance_multiclass_weights():
estimator = get_linear_learner_multi_class_classifier()
estimator.setBalanceMultiClassWeights("True")
assert estimator.getBalanceMultiClassWeights() is True
with pytest.raises(TypeError):
estimator.setBalanceMultiClassWeights(3)
estimator.setBalanceMultiClassWeights("False")
estimator._transfer_params_to_java()
assert estimator.getBalanceMultiClassWeights() ==\
estimator._call_java("getBalanceMultiClassWeights")
def test_linearLearnerRegressor_validates_normalize_data():
estimator = get_linear_learner_multi_class_classifier()
estimator.setNormalizeData("True")
assert estimator.getNormalizeData() is True
with pytest.raises(ValueError):
estimator.setNormalizeData("some-value")
estimator.setNormalizeData("False")
estimator._transfer_params_to_java()
assert estimator.getNormalizeData() == estimator._call_java("getNormalizeData")
def test_linearLearnerRegressor_validates_normalize_label():
estimator = get_linear_learner_multi_class_classifier()
estimator.setNormalizeLabel("True")
assert estimator.getNormalizeLabel() is True
with pytest.raises(ValueError):
estimator.setNormalizeLabel("some-value")
estimator.setNormalizeLabel("False")
estimator._transfer_params_to_java()
assert estimator.getNormalizeLabel() == estimator._call_java("getNormalizeLabel")
def test_linearLearnerRegressor_validates_unbias_data():
estimator = get_linear_learner_multi_class_classifier()
estimator.setUnbiasData("True")
assert estimator.getUnbiasData() is True
with pytest.raises(ValueError):
estimator.setUnbiasData("some-value")
estimator.setUnbiasData("False")
estimator._transfer_params_to_java()
assert estimator.getUnbiasData() == estimator._call_java("getUnbiasData")
def test_linearLearnerRegressor_validates_unbias_label():
estimator = get_linear_learner_multi_class_classifier()
estimator.setUnbiasLabel("True")
assert estimator.getUnbiasLabel() is True
with pytest.raises(ValueError):
estimator.setUnbiasLabel("some-value")
estimator.setUnbiasLabel("False")
estimator._transfer_params_to_java()
assert estimator.getUnbiasLabel() == estimator._call_java("getUnbiasLabel")
def test_linearLearnerMultiClassClassifier_num_point_for_scaler():
estimator = get_linear_learner_multi_class_classifier()
estimator.setNumPointForScaler(5)
assert estimator.getNumPointForScaler() == 5
with pytest.raises(ValueError):
estimator.setNumPointForScaler(-1)
estimator.setNumPointForScaler(1)
estimator._transfer_params_to_java()
assert estimator.getNumPointForScaler() == estimator._call_java("getNumPointForScaler")
def test_linearLearnerMultiClassClassifier_early_stopping_patience():
estimator = get_linear_learner_multi_class_classifier()
estimator.setEarlyStoppingPatience(5)
assert estimator.getEarlyStoppingPatience() == 5
with pytest.raises(ValueError):
estimator.setEarlyStoppingPatience(-1)
estimator.setEarlyStoppingPatience(1)
estimator._transfer_params_to_java()
assert estimator.getEarlyStoppingPatience() == estimator._call_java("getEarlyStoppingPatience")
def test_linearLearnerMultiClassClassifier_early_stopping_tolerance():
estimator = get_linear_learner_multi_class_classifier()
estimator.setEarlyStoppingTolerance(5.0)
assert estimator.getEarlyStoppingTolerance() == 5.0
with pytest.raises(ValueError):
estimator.setEarlyStoppingTolerance(-1.0)
estimator.setEarlyStoppingTolerance(1.0)
estimator._transfer_params_to_java()
assert estimator.getEarlyStoppingTolerance() == \
estimator._call_java("getEarlyStoppingTolerance")
def test_linearLearnerMultiClassClassifier_margin():
estimator = get_linear_learner_multi_class_classifier()
estimator.setMargin(5.0)
assert estimator.getMargin() == 5.0
with pytest.raises(ValueError):
estimator.setMargin(-1.0)
estimator.setMargin(1.0)
estimator._transfer_params_to_java()
assert estimator.getMargin() == estimator._call_java("getMargin")
def test_linearLearnerMultiClassClassifier_quantile():
estimator = get_linear_learner_multi_class_classifier()
estimator.setQuantile(0.5)
assert estimator.getQuantile() == 0.5
with pytest.raises(ValueError):
estimator.setQuantile(-1)
with pytest.raises(ValueError):
estimator.setQuantile(2)
estimator.setQuantile(0.1)
estimator._transfer_params_to_java()
assert estimator.getQuantile() == estimator._call_java("getQuantile")
def test_linearLearnerMultiClassClassifier_loss_insensitivity():
estimator = get_linear_learner_multi_class_classifier()
estimator.setLossInsensitivity(0.5)
assert estimator.getLossInsensitivity() == 0.5
with pytest.raises(ValueError):
estimator.setLossInsensitivity(-1)
estimator.setLossInsensitivity(0.1)
estimator._transfer_params_to_java()
assert estimator.getLossInsensitivity() == estimator._call_java("getLossInsensitivity")
def test_linearLearnerMultiClassClassifier_huber_delta():
estimator = get_linear_learner_multi_class_classifier()
estimator.setHuberDelta(5.0)
assert estimator.getHuberDelta() == 5.0
with pytest.raises(ValueError):
estimator.setHuberDelta(-1.0)
estimator.setHuberDelta(1.0)
estimator._transfer_params_to_java()
assert estimator.getHuberDelta() == estimator._call_java("getHuberDelta")
def test_linearLearnerMultiClassClassifier_f_beta():
estimator = get_linear_learner_multi_class_classifier()
estimator.setFBeta(5.0)
assert estimator.getFBeta() == 5.0
with pytest.raises(ValueError):
estimator.setFBeta(-1.0)
estimator.setFBeta(1.0)
estimator._transfer_params_to_java()
assert estimator.getFBeta() == estimator._call_java("getFBeta")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.