code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import functools
import numpy as np
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert (
_LOCAL_PROCESS_GROUP is not None
), "Local process group is not created! Please use launch() to spawn processes!"
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
if dist.get_backend() == dist.Backend.NCCL:
# This argument is needed to avoid warnings.
# It's valid only for NCCL backend.
dist.barrier(device_ids=[torch.cuda.current_device()])
else:
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group() # use CPU group by default, to reduce GPU RAM usage.
world_size = dist.get_world_size(group)
if world_size == 1:
return [data]
output = [None for _ in range(world_size)]
dist.all_gather_object(output, data, group=group)
return output
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
world_size = dist.get_world_size(group=group)
if world_size == 1:
return [data]
rank = dist.get_rank(group=group)
if rank == dst:
output = [None for _ in range(world_size)]
dist.gather_object(data, output, dst=dst, group=group)
return output
else:
dist.gather_object(data, None, dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict | /roof-mask-0.5.4.tar.gz/roof-mask-0.5.4/detectron2/utils/comm.py | 0.902876 | 0.495606 | comm.py | pypi |
import typing
from typing import Any, List
import fvcore
from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table
from torch import nn
from detectron2.export import TracingAdapter
__all__ = [
"activation_count_operators",
"flop_count_operators",
"parameter_count_table",
"parameter_count",
"FlopCountAnalysis",
]
FLOPS_MODE = "flops"
ACTIVATIONS_MODE = "activations"
# Some extra ops to ignore from counting, including elementwise and reduction ops
_IGNORED_OPS = {
"aten::add",
"aten::add_",
"aten::argmax",
"aten::argsort",
"aten::batch_norm",
"aten::constant_pad_nd",
"aten::div",
"aten::div_",
"aten::exp",
"aten::log2",
"aten::max_pool2d",
"aten::meshgrid",
"aten::mul",
"aten::mul_",
"aten::neg",
"aten::nonzero_numpy",
"aten::reciprocal",
"aten::repeat_interleave",
"aten::rsub",
"aten::sigmoid",
"aten::sigmoid_",
"aten::softmax",
"aten::sort",
"aten::sqrt",
"aten::sub",
"torchvision::nms", # TODO estimate flop for nms
}
class FlopCountAnalysis(fvcore.nn.FlopCountAnalysis):
"""
Same as :class:`fvcore.nn.FlopCountAnalysis`, but supports detectron2 models.
"""
def __init__(self, model, inputs):
"""
Args:
model (nn.Module):
inputs (Any): inputs of the given model. Does not have to be tuple of tensors.
"""
wrapper = TracingAdapter(model, inputs, allow_non_tensor=True)
super().__init__(wrapper, wrapper.flattened_inputs)
self.set_op_handle(**{k: None for k in _IGNORED_OPS})
def flop_count_operators(model: nn.Module, inputs: list) -> typing.DefaultDict[str, float]:
"""
Implement operator-level flops counting using jit.
This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard
detection models in detectron2.
Please use :class:`FlopCountAnalysis` for more advanced functionalities.
Note:
The function runs the input through the model to compute flops.
The flops of a detection model is often input-dependent, for example,
the flops of box & mask head depends on the number of proposals &
the number of detected objects.
Therefore, the flops counting using a single input may not accurately
reflect the computation cost of a model. It's recommended to average
across a number of inputs.
Args:
model: a detectron2 model that takes `list[dict]` as input.
inputs (list[dict]): inputs to model, in detectron2's standard format.
Only "image" key will be used.
supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count`
Returns:
Counter: Gflop count per operator
"""
old_train = model.training
model.eval()
ret = FlopCountAnalysis(model, inputs).by_operator()
model.train(old_train)
return {k: v / 1e9 for k, v in ret.items()}
def activation_count_operators(
model: nn.Module, inputs: list, **kwargs
) -> typing.DefaultDict[str, float]:
"""
Implement operator-level activations counting using jit.
This is a wrapper of fvcore.nn.activation_count, that supports standard detection models
in detectron2.
Note:
The function runs the input through the model to compute activations.
The activations of a detection model is often input-dependent, for example,
the activations of box & mask head depends on the number of proposals &
the number of detected objects.
Args:
model: a detectron2 model that takes `list[dict]` as input.
inputs (list[dict]): inputs to model, in detectron2's standard format.
Only "image" key will be used.
Returns:
Counter: activation count per operator
"""
return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs)
def _wrapper_count_operators(
model: nn.Module, inputs: list, mode: str, **kwargs
) -> typing.DefaultDict[str, float]:
# ignore some ops
supported_ops = {k: lambda *args, **kwargs: {} for k in _IGNORED_OPS}
supported_ops.update(kwargs.pop("supported_ops", {}))
kwargs["supported_ops"] = supported_ops
assert len(inputs) == 1, "Please use batch size=1"
tensor_input = inputs[0]["image"]
inputs = [{"image": tensor_input}] # remove other keys, in case there are any
old_train = model.training
if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
model = model.module
wrapper = TracingAdapter(model, inputs)
wrapper.eval()
if mode == FLOPS_MODE:
ret = flop_count(wrapper, (tensor_input,), **kwargs)
elif mode == ACTIVATIONS_MODE:
ret = activation_count(wrapper, (tensor_input,), **kwargs)
else:
raise NotImplementedError("Count for mode {} is not supported yet.".format(mode))
# compatible with change in fvcore
if isinstance(ret, tuple):
ret = ret[0]
model.train(old_train)
return ret
def find_unused_parameters(model: nn.Module, inputs: Any) -> List[str]:
"""
Given a model, find parameters that do not contribute
to the loss.
Args:
model: a model in training mode that returns losses
inputs: argument or a tuple of arguments. Inputs of the model
Returns:
list[str]: the name of unused parameters
"""
assert model.training
for _, prm in model.named_parameters():
prm.grad = None
if isinstance(inputs, tuple):
losses = model(*inputs)
else:
losses = model(inputs)
if isinstance(losses, dict):
losses = sum(losses.values())
losses.backward()
unused: List[str] = []
for name, prm in model.named_parameters():
if prm.grad is None:
unused.append(name)
prm.grad = None
return unused | /roof-mask-0.5.4.tar.gz/roof-mask-0.5.4/detectron2/utils/analysis.py | 0.840521 | 0.545104 | analysis.py | pypi |
import math
import numpy as np
import torch
import torch.nn as nn
from utils.downloads import attempt_download
class Sum(nn.Module):
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, n, weight=False): # n: number of inputs
super().__init__()
self.weight = weight # apply weights boolean
self.iter = range(n - 1) # iter object
if weight:
self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
def forward(self, x):
y = x[0] # no weight
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y
class MixConv2d(nn.Module):
# Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
super().__init__()
n = len(k) # number of convolutions
if equal_ch: # equal c_ per group
i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
c_ = [(i == g).sum() for g in range(n)] # intermediate channels
else: # equal weight.numel() per group
b = [c2] + [0] * n
a = np.eye(n + 1, n, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
self.m = nn.ModuleList([
nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU()
def forward(self, x):
return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super().__init__()
def forward(self, x, augment=False, profile=False, visualize=False):
y = [module(x, augment, profile, visualize)[0] for module in self]
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output
def attempt_load(weights, device=None, inplace=True, fuse=True):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
from models.yolo import Detect, Model
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
ckpt = torch.load(attempt_download(w), map_location='cpu') # load
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
# Model compatibility updates
if not hasattr(ckpt, 'stride'):
ckpt.stride = torch.tensor([32.])
if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
# Module compatibility updates
for m in model.modules():
t = type(m)
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
m.inplace = inplace # torch 1.7.0 compatibility
if t is Detect and not isinstance(m.anchor_grid, list):
delattr(m, 'anchor_grid')
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
m.recompute_scale_factor = None # torch 1.11.0 compatibility
# Return model
if len(model) == 1:
return model[-1]
# Return detection ensemble
print(f'Ensemble created with {weights}\n')
for k in 'names', 'nc', 'yaml':
setattr(model, k, getattr(model[0], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
return model | /roof-mask-0.5.4.tar.gz/roof-mask-0.5.4/models/experimental.py | 0.942599 | 0.513242 | experimental.py | pypi |
import threading
class Callbacks:
""""
Handles all registered callbacks for YOLOv5 Hooks
"""
def __init__(self):
# Define the available callbacks
self._callbacks = {
'on_pretrain_routine_start': [],
'on_pretrain_routine_end': [],
'on_train_start': [],
'on_train_epoch_start': [],
'on_train_batch_start': [],
'optimizer_step': [],
'on_before_zero_grad': [],
'on_train_batch_end': [],
'on_train_epoch_end': [],
'on_val_start': [],
'on_val_batch_start': [],
'on_val_image_end': [],
'on_val_batch_end': [],
'on_val_end': [],
'on_fit_epoch_end': [], # fit = train + val
'on_model_save': [],
'on_train_end': [],
'on_params_update': [],
'teardown': [],}
self.stop_training = False # set True to interrupt training
def register_action(self, hook, name='', callback=None):
"""
Register a new action to a callback hook
Args:
hook: The callback hook name to register the action to
name: The name of the action for later reference
callback: The callback to fire
"""
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
assert callable(callback), f"callback '{callback}' is not callable"
self._callbacks[hook].append({'name': name, 'callback': callback})
def get_registered_actions(self, hook=None):
""""
Returns all the registered actions by callback hook
Args:
hook: The name of the hook to check, defaults to all
"""
return self._callbacks[hook] if hook else self._callbacks
def run(self, hook, *args, thread=False, **kwargs):
"""
Loop through the registered actions and fire all callbacks on main thread
Args:
hook: The name of the hook to check, defaults to all
args: Arguments to receive from YOLOv5
thread: (boolean) Run callbacks in daemon thread
kwargs: Keyword Arguments to receive from YOLOv5
"""
assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
for logger in self._callbacks[hook]:
if thread:
threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()
else:
logger['callback'](*args, **kwargs) | /roof-mask-0.5.4.tar.gz/roof-mask-0.5.4/utils/callbacks.py | 0.760206 | 0.193662 | callbacks.py | pypi |
from copy import deepcopy
import numpy as np
import torch
from utils.general import LOGGER, colorstr
from utils.torch_utils import profile
def check_train_batch_size(model, imgsz=640, amp=True):
# Check YOLOv5 training batch size
with torch.cuda.amp.autocast(amp):
return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
# Automatically estimate best batch size to use `fraction` of available CUDA memory
# Usage:
# import torch
# from utils.autobatch import autobatch
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
# print(autobatch(model))
# Check device
prefix = colorstr('AutoBatch: ')
LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
device = next(model.parameters()).device # get model device
if device.type == 'cpu':
LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
return batch_size
# Inspect CUDA memory
gb = 1 << 30 # bytes to GiB (1024 ** 3)
d = str(device).upper() # 'CUDA:0'
properties = torch.cuda.get_device_properties(device) # device properties
t = properties.total_memory / gb # GiB total
r = torch.cuda.memory_reserved(device) / gb # GiB reserved
a = torch.cuda.memory_allocated(device) / gb # GiB allocated
f = t - (r + a) # GiB free
LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
# Profile batch sizes
batch_sizes = [1, 2, 4, 8, 16]
try:
img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
results = profile(img, model, n=3, device=device)
except Exception as e:
LOGGER.warning(f'{prefix}{e}')
# Fit a solution
y = [x[2] for x in results if x] # memory [2]
p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
if None in results: # some sizes failed
i = results.index(None) # first fail index
if b >= batch_sizes[i]: # y intercept above failure point
b = batch_sizes[max(i - 1, 0)] # select prior safe point
if b < 1: # zero or negative batch size
b = 16
LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
fraction = np.polyval(p, b) / t # actual fraction predicted
LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
return b | /roof-mask-0.5.4.tar.gz/roof-mask-0.5.4/utils/autobatch.py | 0.753829 | 0.471162 | autobatch.py | pypi |
import numpy as np
from ..metrics import ap_per_class
def fitness(x):
# Model fitness as a weighted combination of metrics
w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]
return (x[:, :8] * w).sum(1)
def ap_per_class_box_and_mask(
tp_m,
tp_b,
conf,
pred_cls,
target_cls,
plot=False,
save_dir=".",
names=(),
):
"""
Args:
tp_b: tp of boxes.
tp_m: tp of masks.
other arguments see `func: ap_per_class`.
"""
results_boxes = ap_per_class(tp_b,
conf,
pred_cls,
target_cls,
plot=plot,
save_dir=save_dir,
names=names,
prefix="Box")[2:]
results_masks = ap_per_class(tp_m,
conf,
pred_cls,
target_cls,
plot=plot,
save_dir=save_dir,
names=names,
prefix="Mask")[2:]
results = {
"boxes": {
"p": results_boxes[0],
"r": results_boxes[1],
"ap": results_boxes[3],
"f1": results_boxes[2],
"ap_class": results_boxes[4]},
"masks": {
"p": results_masks[0],
"r": results_masks[1],
"ap": results_masks[3],
"f1": results_masks[2],
"ap_class": results_masks[4]}}
return results
class Metric:
def __init__(self) -> None:
self.p = [] # (nc, )
self.r = [] # (nc, )
self.f1 = [] # (nc, )
self.all_ap = [] # (nc, 10)
self.ap_class_index = [] # (nc, )
@property
def ap50(self):
"""AP@0.5 of all classes.
Return:
(nc, ) or [].
"""
return self.all_ap[:, 0] if len(self.all_ap) else []
@property
def ap(self):
"""AP@0.5:0.95
Return:
(nc, ) or [].
"""
return self.all_ap.mean(1) if len(self.all_ap) else []
@property
def mp(self):
"""mean precision of all classes.
Return:
float.
"""
return self.p.mean() if len(self.p) else 0.0
@property
def mr(self):
"""mean recall of all classes.
Return:
float.
"""
return self.r.mean() if len(self.r) else 0.0
@property
def map50(self):
"""Mean AP@0.5 of all classes.
Return:
float.
"""
return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
@property
def map(self):
"""Mean AP@0.5:0.95 of all classes.
Return:
float.
"""
return self.all_ap.mean() if len(self.all_ap) else 0.0
def mean_results(self):
"""Mean of results, return mp, mr, map50, map"""
return (self.mp, self.mr, self.map50, self.map)
def class_result(self, i):
"""class-aware result, return p[i], r[i], ap50[i], ap[i]"""
return (self.p[i], self.r[i], self.ap50[i], self.ap[i])
def get_maps(self, nc):
maps = np.zeros(nc) + self.map
for i, c in enumerate(self.ap_class_index):
maps[c] = self.ap[i]
return maps
def update(self, results):
"""
Args:
results: tuple(p, r, ap, f1, ap_class)
"""
p, r, all_ap, f1, ap_class_index = results
self.p = p
self.r = r
self.all_ap = all_ap
self.f1 = f1
self.ap_class_index = ap_class_index
class Metrics:
"""Metric for boxes and masks."""
def __init__(self) -> None:
self.metric_box = Metric()
self.metric_mask = Metric()
def update(self, results):
"""
Args:
results: Dict{'boxes': Dict{}, 'masks': Dict{}}
"""
self.metric_box.update(list(results["boxes"].values()))
self.metric_mask.update(list(results["masks"].values()))
def mean_results(self):
return self.metric_box.mean_results() + self.metric_mask.mean_results()
def class_result(self, i):
return self.metric_box.class_result(i) + self.metric_mask.class_result(i)
def get_maps(self, nc):
return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
@property
def ap_class_index(self):
# boxes and masks have the same ap_class_index
return self.metric_box.ap_class_index
KEYS = [
"train/box_loss",
"train/seg_loss", # train loss
"train/obj_loss",
"train/cls_loss",
"metrics/precision(B)",
"metrics/recall(B)",
"metrics/mAP_0.5(B)",
"metrics/mAP_0.5:0.95(B)", # metrics
"metrics/precision(M)",
"metrics/recall(M)",
"metrics/mAP_0.5(M)",
"metrics/mAP_0.5:0.95(M)", # metrics
"val/box_loss",
"val/seg_loss", # val loss
"val/obj_loss",
"val/cls_loss",
"x/lr0",
"x/lr1",
"x/lr2",]
BEST_KEYS = [
"best/epoch",
"best/precision(B)",
"best/recall(B)",
"best/mAP_0.5(B)",
"best/mAP_0.5:0.95(B)",
"best/precision(M)",
"best/recall(M)",
"best/mAP_0.5(M)",
"best/mAP_0.5:0.95(M)",] | /roof-mask-0.5.4.tar.gz/roof-mask-0.5.4/utils/segment/metrics.py | 0.850562 | 0.376423 | metrics.py | pypi |
import cv2
import torch
import torch.nn.functional as F
def crop(masks, boxes):
"""
"Crop" predicted masks by zeroing out everything not in the predicted bbox.
Vectorized by Chong (thanks Chong).
Args:
- masks should be a size [h, w, n] tensor of masks
- boxes should be a size [n, 4] tensor of bbox coords in relative point form
"""
n, h, w = masks.shape
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1)
c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1)
return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
def process_mask_upsample(protos, masks_in, bboxes, shape):
"""
Crop after upsample.
proto_out: [mask_dim, mask_h, mask_w]
out_masks: [n, mask_dim], n is number of masks after nms
bboxes: [n, 4], n is number of masks after nms
shape:input_image_size, (h, w)
return: h, w, n
"""
c, mh, mw = protos.shape # CHW
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
masks = crop(masks, bboxes) # CHW
return masks.gt_(0.5)
def process_mask(protos, masks_in, bboxes, shape, upsample=False):
"""
Crop before upsample.
proto_out: [mask_dim, mask_h, mask_w]
out_masks: [n, mask_dim], n is number of masks after nms
bboxes: [n, 4], n is number of masks after nms
shape:input_image_size, (h, w)
return: h, w, n
"""
c, mh, mw = protos.shape # CHW
ih, iw = shape
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW
downsampled_bboxes = bboxes.clone()
downsampled_bboxes[:, 0] *= mw / iw
downsampled_bboxes[:, 2] *= mw / iw
downsampled_bboxes[:, 3] *= mh / ih
downsampled_bboxes[:, 1] *= mh / ih
masks = crop(masks, downsampled_bboxes) # CHW
if upsample:
masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
return masks.gt_(0.5)
def scale_masks(img1_shape, masks, img0_shape, ratio_pad=None):
"""
img1_shape: model input shape, [h, w]
img0_shape: origin pic shape, [h, w, 3]
masks: [h, w, num]
resize for the most time
"""
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
tl_pad = int(pad[1]), int(pad[0]) # y, x
br_pad = int(img1_shape[0] - pad[1]), int(img1_shape[1] - pad[0])
if len(masks.shape) < 2:
raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
# masks_h, masks_w, n
masks = masks[tl_pad[0]:br_pad[0], tl_pad[1]:br_pad[1]]
# 1, n, masks_h, masks_w
# masks = masks.permute(2, 0, 1).contiguous()[None, :]
# # shape = [1, n, masks_h, masks_w] after F.interpolate, so take first element
# masks = F.interpolate(masks, img0_shape[:2], mode='bilinear', align_corners=False)[0]
# masks = masks.permute(1, 2, 0).contiguous()
# masks_h, masks_w, n
masks = cv2.resize(masks, (img0_shape[1], img0_shape[0]))
# keepdim
if len(masks.shape) == 2:
masks = masks[:, :, None]
return masks
def mask_iou(mask1, mask2, eps=1e-7):
"""
mask1: [N, n] m1 means number of predicted objects
mask2: [M, n] m2 means number of gt objects
Note: n means image_w x image_h
return: masks iou, [N, M]
"""
intersection = torch.matmul(mask1, mask2.t()).clamp(0)
union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection
return intersection / (union + eps)
def masks_iou(mask1, mask2, eps=1e-7):
"""
mask1: [N, n] m1 means number of predicted objects
mask2: [N, n] m2 means number of gt objects
Note: n means image_w x image_h
return: masks iou, (N, )
"""
intersection = (mask1 * mask2).sum(1).clamp(0) # (N, )
union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection
return intersection / (union + eps) | /roof-mask-0.5.4.tar.gz/roof-mask-0.5.4/utils/segment/general.py | 0.807119 | 0.629461 | general.py | pypi |
# The Room environment - v1
[](https://badge.fury.io/py/room-env)
For the documentation of [RoomEnv-v0](./documents/README-v0.md), click the corresponding buttons.
This document, RoomEnv-v1, is the most up-to-date one.
We have released a challenging [Gymnasium](https://www.gymlibrary.dev/) compatible
environment. The best strategy for this environment is to have both episodic and semantic
memory systems. See the [paper](https://arxiv.org/abs/2212.02098) for more information.
## Prerequisites
1. A unix or unix-like x86 machine
1. python 3.8 or higher.
1. Running in a virtual environment (e.g., conda, virtualenv, etc.) is highly recommended so that you don't mess up with the system python.
1. This env is added to the PyPI server. Just run: `pip install room-env`
## RoomEnv-v1
```python
import gymnasium as gym
import room_env
import random
env = gym.make("RoomEnv-v1")
observation, info = env.reset()
rewards = 0
while True:
# There is one different thing in the RoomEnv from the original AAAI-2023 paper:
# The reward is either +1 or -1, instead of +1 or 0.
observation, reward, done, truncated, info = env.step(random.randint(0, 2))
rewards += reward
if done:
break
print(rewards)
```
Every time when an agent takes an action, the environment will give you three memory
systems (i.e., episodic, semantic, and short-term), as an `observation`. The goal of the
agent is to learn a memory management policy. The actions are:
- 0: Put the short-term memory into the episodic memory system.
- 1: Put it into the semantic.
- 2: Just forget it.
The memory systems will be managed according to your actions, and they will eventually
be used to answer questions. You don't have to worry about the question answering. It's done
by the environment. The better you manage your memory systems, the higher chances that
your agent can answer more questions correctly!
The default parameters for the environment are
```json
{
"des_size": "l",
"seed": 42,
"policies": {"encoding": "argmax",
"memory_management": "RL",
"question_answer": "episodic_semantic"},
"capacity": {"episodic": 16, "semantic": 16, "short": 1},
"question_prob": 1.0,
"observation_params": "perfect",
"allow_random_human": False,
"allow_random_question": False,
"total_episode_rewards": 128,
"pretrain_semantic": False,
"check_resources": True,
"varying_rewards": False
}
```
If you want to create an env with a different set of parameters, you can do so. For example:
```python
env_params = {"seed": 0,
"capacity": {"episodic": 8, "semantic": 16, "short": 1},
"pretrain_semantic": True}
env = gym.make("RoomEnv-v1", **env_params)
```
Take a look at [this repo](https://github.com/tae898/explicit-memory) for an actual
interaction with this environment to learn a policy.
## Data collection
Data is collected from querying ConceptNet APIs. For simplicity, we only collect triples
whose format is (`head`, `AtLocation`, `tail`). Here `head` is one of the 80 MS COCO
dataset categories. This was kept in mind so that later on we can use images as well.
If you want to collect the data manually, then run below:
```
python collect_data.py
```
## [The RoomDes](room_env/des.py)
The DES is part of RoomEnv. You don't have to care about how it works. If you are still
curious, you can read below.
You can run the RoomDes by
```python
from room_env.des import RoomDes
des = RoomDes()
des.run(debug=True)
```
with `debug=True` it'll print events (i.e., state changes) to the console.
```console
{'resource_changes': {'desk': -1, 'lap': 1},
'state_changes': {'Vincent': {'current_time': 1,
'object_location': {'current': 'desk',
'previous': 'lap'}}}}
{'resource_changes': {}, 'state_changes': {}}
{'resource_changes': {}, 'state_changes': {}}
{'resource_changes': {},
'state_changes': {'Michael': {'current_time': 4,
'object_location': {'current': 'lap',
'previous': 'desk'}},
'Tae': {'current_time': 4,
'object_location': {'current': 'desk',
'previous': 'lap'}}}}
```
## Contributing
Contributions are what make the open source community such an amazing place to be learn,
inspire, and create. Any contributions you make are **greatly appreciated**.
1. Fork the Project
1. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)
1. Run `make test && make style && make quality` in the root repo directory,
to ensure code quality.
1. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)
1. Push to the Branch (`git push origin feature/AmazingFeature`)
1. Open a Pull Request
## [Cite our paper](https://arxiv.org/abs/2212.02098)
```bibtex
@misc{https://doi.org/10.48550/arxiv.2212.02098,
doi = {10.48550/ARXIV.2212.02098},
url = {https://arxiv.org/abs/2212.02098},
author = {Kim, Taewoon and Cochez, Michael and François-Lavet, Vincent and Neerincx, Mark and Vossen, Piek},
keywords = {Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {A Machine with Short-Term, Episodic, and Semantic Memory Systems},
publisher = {arXiv},
year = {2022},
copyright = {Creative Commons Attribution 4.0 International}
}
```
## Cite our code
[](https://zenodo.org/badge/latestdoi/477781069)
## Authors
- [Taewoon Kim](https://taewoon.kim/)
- [Michael Cochez](https://www.cochez.nl/)
- [Vincent Francois-Lavet](http://vincent.francois-l.be/)
- [Mark Neerincx](https://ocw.tudelft.nl/teachers/m_a_neerincx/)
- [Piek Vossen](https://vossen.info/)
## License
[MIT](https://choosealicense.com/licenses/mit/)
| /room_env-1.0.2.tar.gz/room_env-1.0.2/README.md | 0.533884 | 0.93744 | README.md | pypi |
import logging
import os
import random
from copy import deepcopy
from pprint import pformat
from typing import List, Tuple
from .utils import get_duplicate_dicts, list_duplicates_of
logging.basicConfig(
level=os.environ.get("LOGLEVEL", "INFO").upper(),
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
class Memory:
"""Memory (episodic, semantic, or short) class"""
def __init__(self, memory_type: str, capacity: int) -> None:
"""
Args
----
memory_type: episodic, semantic, or short
capacity: memory capacity
"""
logging.debug(
f"instantiating a {memory_type} memory object with size {capacity} ..."
)
assert memory_type in ["episodic", "semantic", "short"]
self.type = memory_type
self.entries = []
self.capacity = capacity
self._frozen = False
logging.debug(f"{memory_type} memory object with size {capacity} instantiated!")
def __repr__(self):
return pformat(vars(self), indent=4, width=1)
def forget(self, mem: dict) -> None:
"""forget the given memory.
Args
----
mem: A memory in a dictionary format.
for episodic and short:
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
for semantic:
{"object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"num_generalized": <NUM_GENERALIZED>}
"""
if self._frozen:
error_msg = "The memory system is frozen!"
logging.error(error_msg)
raise ValueError(error_msg)
if mem not in self.entries:
error_msg = f"{mem} is not in the memory system!"
logging.error(error_msg)
raise ValueError(error_msg)
logging.debug(f"Forgetting {mem} ...")
self.entries.remove(mem)
logging.info(f"{mem} forgotten!")
def forget_all(self) -> None:
"""Forget everything in the memory system!"""
if self.is_frozen:
logging.warning(
"The memory system is frozen. Can't forget all. Unfreeze first."
)
else:
logging.warning("EVERYTHING IN THE MEMORY SYSTEM WILL BE FORGOTTEN!")
self.entries = []
@property
def is_empty(self) -> bool:
"""Return true if empty."""
return len(self.entries) == 0
@property
def is_full(self) -> bool:
"""Return true if full."""
return len(self.entries) == self.capacity
@property
def is_frozen(self) -> bool:
"""Is frozen?"""
return self._frozen
@property
def size(self) -> int:
"""Get the size (number of filled entries) of the memory system."""
return len(self.entries)
def freeze(self) -> None:
"""Freeze the memory so that nothing can be added / deleted."""
self._frozen = True
def unfreeze(self) -> None:
"""Unfreeze the memory so that something can be added / deleted."""
self._frozen = False
def forget_random(self) -> None:
"""Forget a memory in the memory system in a uniform distribution manner."""
logging.warning("forgetting a random memory using a uniform distribution ...")
mem = random.choice(self.entries)
self.forget(mem)
def increase_capacity(self, increase: int) -> None:
"""Increase the capacity.
Args
----
increase: the amount of entries to increase.
"""
assert isinstance(increase, int) and (not self.is_frozen)
logging.debug(f"Increasing the memory capacity by {increase} ...")
self.capacity += increase
logging.info(
f"The memory capacity has been increased by {increase} and now it's "
f"{self.capacity}!"
)
def decrease_capacity(self, decrease: int) -> None:
"""decrease the capacity.
Args
----
decrease: the amount of entries to decrease.
"""
assert (
isinstance(decrease, int)
and (self.capacity - decrease >= 0)
and (not self.is_frozen)
)
logging.debug(f"Decreasing the memory capacity by {decrease} ...")
self.capacity -= decrease
logging.info(
f"The memory capacity has been decreased by {decrease} and now it's "
f"{self.capacity}!"
)
class EpisodicMemory(Memory):
"""Episodic memory class."""
def __init__(self, capacity: int) -> None:
"""Init an episodic memory system.
Args
----
capacity: capacity of the memory system (i.e., number of entries)
"""
super().__init__("episodic", capacity)
def can_be_added(self, mem: dict) -> bool:
"""Checks if a memory can be added to the system or not.
Args
----
mem: An episodic memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
Returns
-------
True or False
"""
if (self.capacity <= 0) or (self._frozen) or (self.is_full):
return False
else:
return True
def add(self, mem: dict) -> None:
"""Append a memory to the episodic memory system.
Args
----
mem: An episodic memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
if self._frozen:
error_msg = "The memory system is frozen!"
logging.error(error_msg)
raise ValueError(error_msg)
logging.debug(f"Adding a new memory entry {mem} ...")
self.entries.append(mem)
logging.info(
f"memory entry {mem} added. Now there are in total of "
f"{len(self.entries)} memories!"
)
self.clean_old_memories()
# sort ascending
self.entries.sort(key=lambda x: x["timestamp"])
assert self.size <= self.capacity
def get_oldest_memory(self, entries: list = None) -> List:
"""Get the oldest memory in the episodic memory system.
At the moment, this is simply done by looking up the timestamps and comparing
them.
Returns
-------
mem: the oldest memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
if entries is None:
logging.debug("No entries were specified. We'll use the memory system.")
entries = self.entries
# sorted() is ascending by default.
mem_candidate = sorted(entries, key=lambda x: x["timestamp"])[0]
mem = random.choice(
[mem for mem in entries if mem_candidate["timestamp"] == mem["timestamp"]]
)
logging.info(f"{mem} is the oldest memory in the entries.")
return mem
def get_latest_memory(self, entries: list = None) -> dict:
"""Get the latest memory in the episodic memory system.
At the moment, this is simply done by looking up the timestamps and comparing
them.
Returns
-------
mem: the latest memory in a dictionary format
for episodic:
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
if entries is None:
logging.debug("No entries were specified. We'll use the memory system.")
entries = self.entries
# sorted() is ascending by default.
mem_candidate = sorted(entries, key=lambda x: x["timestamp"])[-1]
mem = random.choice(
[mem for mem in entries if mem_candidate["timestamp"] == mem["timestamp"]]
)
logging.info(f"{mem} is the oldest memory in the entries.")
return mem
def forget_oldest(self) -> None:
"""Forget the oldest entry in the memory system.
At the moment, this is simply done by looking up the timestamps and comparing
them.
"""
logging.debug("forgetting the oldest memory (FIFO)...")
mem = self.get_oldest_memory()
self.forget(mem)
def answer_random(self) -> Tuple[str, int]:
"""Answer the question with a uniform-randomly chosen memory.
Returns
-------
pred: prediction (e.g., desk)
timestamp
"""
if self.is_empty:
logging.warning("Memory is empty. I can't answer any questions!")
pred = None
timestamp = None
else:
mem = random.choice(self.entries)
pred = mem["object_location"]
timestamp = mem["timestamp"]
logging.info(f"pred: {pred}, timestamp: {timestamp}")
return pred, timestamp
def answer_latest(self, question: dict) -> Tuple[str, int]:
"""Answer the question with the latest relevant memory.
If object X was found at Y and then later on found Z, then this strategy answers
Z, instead of Y.
Args
----
question: a dict (i.e., {"human": <HUMAN>, "object": <OBJECT>})
Returns
-------
pred: prediction
timestamp: timestamp
"""
logging.debug("answering a question with the answer_latest policy ...")
if self.is_empty:
logging.warning("Memory is empty. I can't answer any questions!")
pred = None
timestamp = None
else:
duplicates = get_duplicate_dicts(
{"human": question["human"], "object": question["object"]}, self.entries
)
if len(duplicates) == 0:
logging.info("no relevant memories found.")
pred = None
timestamp = None
else:
logging.info(
f"{len(duplicates)} relevant memories were found in the entries!"
)
mem = self.get_latest_memory(duplicates)
pred = mem["object_location"]
timestamp = mem["timestamp"]
logging.info(f"pred: {pred}, timestamp: {timestamp}")
return pred, timestamp
@staticmethod
def ob2epi(ob: dict) -> dict:
"""Turn an observation into an episodic memory.
At the moment, the observation format is the same as an episodic memory
for simplification.
Args
----
ob: An observation in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>,
"object_location": <OBJECT_LOCATION>, "current_time": <CURRENT_TIME>}
Returns
-------
mem: An episodic memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
logging.debug(f"Turning an observation {ob} into a episodic memory ...")
mem = deepcopy(ob)
mem["timestamp"] = mem.pop("current_time")
logging.info(f"Observation {ob} is now a episodic memory {mem}")
return mem
def find_same_memory(self, mem) -> dict:
"""Find an episodic memory that's almost the same as the query memory.
Args
----
mem: An episodic memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>,
"object_location": <OBJECT_LOCATION>, "timestamp": <TIMESTAMP>}
Returns
-------
an episodic memory if it exists. Otherwise return None.
"""
for entry in self.entries:
if (
(entry["human"] == mem["human"])
and (entry["object"] == mem["object"])
and (entry["object_location"] == mem["object_location"])
):
return entry
return None
def clean_old_memories(self) -> List:
"""Find if there are duplicate memories with different timestamps."""
logging.debug("finding if duplicate memories exist ...")
entries = deepcopy(self.entries)
logging.debug(f"There are {len(entries)} episdoic memories before cleaning")
for entry in entries:
del entry["timestamp"]
entries = [str(mem) for mem in entries] # to make list hashable
uniques = set(entries)
locs_all = [
list_duplicates_of(entries, unique_entry) for unique_entry in uniques
]
locs_all.sort(key=len)
entries_cleaned = []
for locs in locs_all:
mem = self.entries[locs[0]]
mem["timestamp"] = max([self.entries[loc]["timestamp"] for loc in locs])
entries_cleaned.append(mem)
self.entries = entries_cleaned
logging.debug(f"There are {len(self.entries)} episdoic memories after cleaning")
class ShortMemory(Memory):
"""Short-term memory class."""
def __init__(self, capacity: int) -> None:
super().__init__("short", capacity)
def add(self, mem: dict) -> None:
"""Append a memory to the short memory system.
mem: A short memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
if self._frozen:
error_msg = "The memory system is frozen!"
logging.error(error_msg)
raise ValueError(error_msg)
assert not self.is_full
logging.debug(f"Adding a new memory entry {mem} ...")
self.entries.append(mem)
logging.info(
f"memory entry {mem} added. Now there are in total of "
f"{len(self.entries)} memories!"
)
# sort ascending
self.entries.sort(key=lambda x: x["timestamp"])
assert self.size <= self.capacity
def get_oldest_memory(self, entries: list = None) -> List:
"""Get the oldest memory in the episodic memory system.
At the moment, this is simply done by looking up the timestamps and comparing
them.
Returns
-------
mem: the oldest memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
if entries is None:
logging.debug("No entries were specified. We'll use the memory system.")
entries = self.entries
# sorted() is ascending by default.
mem_candidate = sorted(entries, key=lambda x: x["timestamp"])[0]
mem = random.choice(
[mem for mem in entries if mem_candidate["timestamp"] == mem["timestamp"]]
)
logging.info(f"{mem} is the oldest memory in the entries.")
return mem
def get_latest_memory(self, entries: list = None) -> dict:
"""Get the latest memory in the episodic memory system.
At the moment, this is simply done by looking up the timestamps and comparing
them.
Returns
-------
mem: the latest memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
if entries is None:
logging.debug("No entries were specified. We'll use the memory system.")
entries = self.entries
# sorted() is ascending by default.
mem_candidate = sorted(entries, key=lambda x: x["timestamp"])[-1]
mem = random.choice(
[mem for mem in entries if mem_candidate["timestamp"] == mem["timestamp"]]
)
logging.info(f"{mem} is the oldest memory in the entries.")
return mem
def forget_oldest(self) -> None:
"""Forget the oldest entry in the memory system.
At the moment, this is simply done by looking up the timestamps and comparing
them.
"""
logging.debug("forgetting the oldest memory (FIFO)...")
mem = self.get_oldest_memory()
self.forget(mem)
def find_similar_memories(self, mem) -> None:
"""Find similar memories.
mem: A short memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
logging.debug("Searching for similar memories in the short memory system...")
similar = []
for entry in self.entries:
if (entry["object"] == mem["object"]) and (
entry["object_location"] == mem["object_location"]
):
similar.append(entry)
logging.info(f"{len(similar)} similar short memories found!")
return similar
@staticmethod
def ob2short(ob: dict) -> dict:
"""Turn an observation into an short memory.
At the moment, the observation format is almost the same as an episodic memory
for simplification.
Args
----
ob: An observation in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>,
"object_location": <OBJECT_LOCATION>, "current_time": <CURRENT_TIME>}
Returns
-------
mem: An episodic memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
logging.debug(f"Turning an observation {ob} into a short memory ...")
mem = deepcopy(ob)
mem["timestamp"] = mem.pop("current_time")
logging.info(f"Observation {ob} is now a episodic memory {mem}")
return mem
@staticmethod
def short2epi(short: dict) -> dict:
"""Turn a short memory into a episodic memory.
Args
----
short: A short memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
Returns
-------
epi: An episodic memory in a dictionary format
{"human": <HUMAN>,
"object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
epi = deepcopy(short)
return epi
@staticmethod
def short2sem(short: dict) -> dict:
"""Turn a short memory into a episodic memory.
Args
----
short: A short memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
Returns
-------
sem: A semantic memory in a dictionary format
{"object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"num_generalized": <NUM_GENERALIZED>}
"""
sem = deepcopy(short)
del sem["human"]
del sem["timestamp"]
sem["num_generalized"] = 1
return sem
def find_same_memory(self, mem) -> dict:
"""Find a short memory that's almost the same as the query memory.
Args
----
mem: A short memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>,
"object_location": <OBJECT_LOCATION>, "timestamp": <TIMESTAMP>}
Returns
-------
A short memory if it exists. Otherwise return None.
"""
for entry in self.entries:
if (
(entry["human"] == mem["human"])
and (entry["object"] == mem["object"])
and (entry["object_location"] == mem["object_location"])
):
return entry
return None
class SemanticMemory(Memory):
"""Semantic memory class."""
def __init__(
self,
capacity: int,
) -> None:
"""Init a semantic memory system.
Args
----
capacity: capacity of the memory system (i.e., number of entries)
"""
super().__init__("semantic", capacity)
def can_be_added(self, mem: dict) -> bool:
"""Checks if a memory can be added to the system or not.
Args
----
True or False
"""
if self.capacity <= 0:
return False
if self._frozen:
return False
if self.is_full:
if self.find_same_memory(mem) is None:
return False
else:
return True
else:
return True
def pretrain_semantic(
self,
semantic_knowledge: dict,
return_remaining_space: bool = True,
freeze: bool = True,
) -> int:
"""Pretrain the semantic memory system from ConceptNet.
Args
----
semantic_knowledge: from ConceptNet.
return_remaining_space: whether or not to return the remaining space from the
semantic memory system.
freeze: whether or not to freeze the semantic memory system or not.
Returns
-------
free_space: free space that was not used, if any, so that it can be added to
the episodic memory system.
"""
self.semantic_knowledge = deepcopy(semantic_knowledge)
for obj, loc in self.semantic_knowledge.items():
if self.is_full:
break
mem = {"object": obj, "object_location": loc, "num_generalized": 1}
logging.debug(f"adding a pretrained semantic knowledge {mem}")
self.add(mem)
if return_remaining_space:
free_space = self.capacity - len(self.entries)
self.decrease_capacity(free_space)
logging.info(
f"The remaining space {free_space} will be returned. Now "
f"the capacity of the semantic memory system is {self.capacity}"
)
else:
free_space = None
if freeze:
self.freeze()
logging.info("The semantic memory system is frozen!")
return free_space
def get_weakest_memory(self, entries: list = None) -> List:
"""Get the weakest memory in the semantic memory system system.
At the moment, this is simply done by looking up the number of generalized
memories comparing them. In the end, an RL agent has to learn this
by itself.
Returns
-------
mem: the weakest memory in a dictionary format
for semantic:
{"object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"num_generalized": <NUM_GENERALIZED>}
"""
if entries is None:
logging.debug("No entries were specified. We'll use the memory system.")
entries = self.entries
# sorted() is ascending by default.
mem_candidate = sorted(entries, key=lambda x: x["num_generalized"])[0]
mem = random.choice(
[
mem
for mem in entries
if mem_candidate["num_generalized"] == mem["num_generalized"]
]
)
logging.info(f"{mem} is the weakest memory in the entries.")
return mem
def get_strongest_memory(self, entries: list = None) -> List:
"""Get the strongest memory in the semantic memory system system.
At the moment, this is simply done by looking up the number of generalized
memories comparing them.
Returns
-------
mem: the strongest memory in a dictionary format
for semantic:
{"object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"num_generalized": <NUM_GENERALIZED>}
"""
if entries is None:
logging.debug("No entries were specified. We'll use the memory system.")
entries = self.entries
# sorted() is ascending by default.
mem_candidate = sorted(entries, key=lambda x: x["num_generalized"])[-1]
mem = random.choice(
[
mem
for mem in entries
if mem_candidate["num_generalized"] == mem["num_generalized"]
]
)
logging.info(f"{mem} is the strongest memory in the entries.")
return mem
def find_similar_memories(self, mem) -> None:
"""Find similar memories.
mem: A short memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"timestamp": <TIMESTAMP>}
"""
logging.debug("Searching for similar memories in the short memory system...")
similar = []
for entry in self.entries:
if (entry["object"] == mem["object"]) and (
entry["object_location"] == mem["object_location"]
):
similar.append(entry)
logging.info(f"{len(similar)} similar short memories found!")
return similar
def forget_weakest(self) -> None:
"""Forget the weakest entry in the semantic memory system.
At the moment, this is simply done by looking up the number of generalized
memories and comparing them.
"""
logging.debug("forgetting the weakest memory ...")
mem = self.get_weakest_memory()
self.forget(mem)
logging.info(f"{mem} is forgotten!")
def answer_random(self) -> Tuple[str, int]:
"""Answer the question with a uniform-randomly chosen memory.
Returns
-------
pred: prediction (e.g., desk)
num_generalized
"""
if self.is_empty:
logging.warning("Memory is empty. I can't answer any questions!")
pred = None
num_generalized = None
else:
mem = random.choice(self.entries)
pred = mem["object_location"]
num_generalized = mem["num_generalized"]
logging.info(f"pred: {pred}, num_generalized: {num_generalized}")
return pred, num_generalized
def answer_strongest(self, question: list) -> Tuple[str, int]:
"""Answer the question (Find the head that matches the question, and choose the
strongest one among them).
Args
----
question: a dict (i.e., {"human": <HUMAN>, "object": <OBJECT>})
Returns
-------
pred: prediction
num_generalized: number of generalized samples.
"""
logging.debug("answering a question with the answer_strongest policy ...")
if self.is_empty:
logging.warning("Memory is empty. I can't answer any questions!")
pred = None
num_generalized = None
else:
duplicates = get_duplicate_dicts(
{"object": question["object"]}, self.entries
)
if len(duplicates) == 0:
logging.info("no relevant memories found.")
pred = None
num_generalized = None
else:
logging.info(
f"{len(duplicates)} relevant memories were found in the entries!"
)
mem = self.get_strongest_memory(duplicates)
pred = mem["object_location"]
num_generalized = mem["num_generalized"]
logging.info(f"pred: {pred}, num_generalized: {num_generalized}")
return pred, num_generalized
@staticmethod
def ob2sem(ob: dict) -> dict:
"""Turn an observation into a semantic memory.
At the moment, this is simply done by removing the names from the head and the
tail.
Args
----
ob: An observation in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>,
"object_location": <OBJECT_LOCATION>, "current_time": <CURRENT_TIME>}
Returns
-------
mem: A semantic memory in a dictionary format
{"human": <HUMAN>, "object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"num_generalized": <NUM_GENERALIZED>}
"""
logging.debug(f"Turning an observation {ob} into a semantic memory ...")
mem = deepcopy(ob)
del mem["human"]
del mem["current_time"]
# 1 stands for the 1 generalized.
mem["num_generalized"] = 1
logging.info(f"Observation {ob} is now a semantic memory {mem}")
return mem
def clean_same_memories(self) -> List:
"""Find if there are duplicate memories cuz they should be summed out.
At the moment, this is simply done by matching string values.
"""
logging.debug("finding if duplicate memories exist ...")
entries = deepcopy(self.entries)
logging.debug(
f"There are in total of {len(entries)} semantic memories before cleaning"
)
for entry in entries:
del entry["num_generalized"]
entries = [str(mem) for mem in entries] # to make list hashable
uniques = set(entries)
locs_all = [
list_duplicates_of(entries, unique_entry) for unique_entry in uniques
]
locs_all.sort(key=len)
entries_cleaned = []
for locs in locs_all:
mem = self.entries[locs[0]]
mem["num_generalized"] = sum(
[self.entries[loc]["num_generalized"] for loc in locs]
)
entries_cleaned.append(mem)
self.entries = entries_cleaned
logging.debug(
f"There are now in total of {len(self.entries)} semantic memories after cleaning"
)
def add(self, mem: dict):
"""Append a memory to the semantic memory system.
Args
----
mem: A memory in a dictionary format
{"object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"num_generalized": <NUM_GENERALIZED>}
"""
if self._frozen:
error_msg = "The memory system is frozen!"
logging.error(error_msg)
raise ValueError(error_msg)
logging.debug(f"Adding a new memory entry {mem} ...")
self.entries.append(mem)
logging.info(
f"memory entry {mem} added. Now there are in total of "
f"{len(self.entries)} memories!"
)
self.clean_same_memories()
# sort ascending
self.entries.sort(key=lambda x: x["num_generalized"])
assert self.size <= self.capacity
def find_same_memory(self, mem) -> dict:
"""Find a semantic memory that's almost the same as the query memory.
Args
----
mem: A semantic memory in a dictionary format
{"object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"num_generalized": <NUM_GENERALIZED>}
Returns
-------
A semantic memory if it exists. Otherwise return None.
"""
for entry in self.entries:
if (entry["object"] == mem["object"]) and (
entry["object_location"] == mem["object_location"]
):
return entry
return None
def find_same_object_memory(self, mem) -> dict:
"""Find a semantic memory whose object is the same as the query memory.
Args
----
mem: A semantic memory in a dictionary format
{"object": <OBJECT>, "object_location": <OBJECT_LOCATION>,
"num_generalized": <NUM_GENERALIZED>}
Returns
-------
A semantic memory if it exists. Otherwise return None.
"""
for entry in self.entries:
if (entry["object"] == mem["object"]) and (
entry["object_location"] != mem["object_location"]
):
return entry
return None | /room_env-1.0.2.tar.gz/room_env-1.0.2/room_env/memory.py | 0.834744 | 0.431824 | memory.py | pypi |
import json
import logging
import os
import random
import subprocess
from copy import deepcopy
from typing import List, Tuple
import gymnasium as gym
import numpy as np
import torch
import yaml
import room_env
from .des import RoomDes
logging.basicConfig(
level=os.environ.get("LOGLEVEL", "INFO").upper(),
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
def seed_everything(seed: int) -> None:
"""Seed every randomness to seed"""
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def read_lines(fname: str) -> list:
"""Read lines from a text file.
There is some path magic going on here. This is to account for both the production
and development mode. Don't use this function for a general purpose.
"""
if fname.startswith("/"):
fullpath = fname
else:
fullpath = os.path.join(os.path.dirname(__file__), fname)
logging.debug(f"Reading {fullpath} ...")
with open(fullpath, "r") as stream:
names = stream.readlines()
names = [line.strip() for line in names]
return names
def read_json(fname: str) -> dict:
"""Read json"""
logging.debug(f"reading json {fname} ...")
with open(fname, "r") as stream:
return json.load(stream)
def write_json(content: dict, fname: str) -> None:
"""Write json"""
logging.debug(f"writing json {fname} ...")
with open(fname, "w") as stream:
json.dump(content, stream, indent=4, sort_keys=False)
def read_yaml(fname: str) -> dict:
"""Read yaml.
There is some path magic going on here. This is to account for both the production
and development mode. Don't use this function for a general purpose.
"""
if fname.startswith("/"):
fullpath = fname
else:
fullpath = os.path.join(os.path.dirname(__file__), fname)
logging.debug(f"reading yaml {fullpath} ...")
with open(fullpath, "r") as stream:
return yaml.safe_load(stream)
def write_yaml(content: dict, fname: str) -> None:
"""write yaml."""
logging.debug(f"writing yaml {fname} ...")
with open(fname, "w") as stream:
yaml.dump(content, stream, indent=2, sort_keys=False)
def read_data(data_path: str) -> dict:
"""Read train, val, test spilts.
Args
----
data_path: path to data.
Returns
-------
data: {'train': list of training obs,
'val': list of val obs,
'test': list of test obs}
"""
logging.debug(f"reading data from {data_path} ...")
data = read_json(data_path)
logging.info(f"Succesfully read data {data_path}")
return data
def argmax(iterable):
"""argmax"""
return max(enumerate(iterable), key=lambda x: x[1])[0]
def remove_name(entity: str) -> str:
"""Remove name from the entity.
Args
----
entity: e.g., Bob's laptop
Returns
-------
e.g., laptop
"""
return entity.split()[-1]
def split_name_entity(name_entity: str) -> Tuple[str, str]:
"""Separate name and entity from the given string.
Args
----
name_entity: e.g., "Bob's laptop"
Returns
-------
name: e.g., Bob
entity: e.g., laptop
"""
logging.debug(f"spliting name and entity from {name_entity}")
splitted = name_entity.split()
assert len(splitted) == 2 and "'" in splitted[0]
name = splitted[0].split("'")[0]
entity = splitted[1]
return name, entity
def get_duplicate_dicts(search: dict, target: list) -> List:
"""Find if there are duplicate dicts.
Args
----
search: dict
target: target list to look up.
Returns
-------
duplicates: a list of dicts or None
"""
assert isinstance(search, dict)
logging.debug("finding if duplicate dicts exist ...")
duplicates = []
for candidate in target:
assert isinstance(candidate, dict)
if set(search).issubset(set(candidate)):
if all([val == candidate[key] for key, val in search.items()]):
duplicates.append(candidate)
logging.info(f"{len(duplicates)} duplicates were found!")
return duplicates
def list_duplicates_of(seq, item) -> List:
# https://stackoverflow.com/questions/5419204/index-of-duplicates-items-in-a-python-list
start_at = -1
locs = []
while True:
try:
loc = seq.index(item, start_at + 1)
except ValueError:
break
else:
locs.append(loc)
start_at = loc
return locs
def make_des_config(
commonsense_prob: float,
num_humans: int,
num_total_objects: int,
maximum_num_objects_per_human: int,
maximum_num_locations_per_object: int,
maxiumum_days_period: int,
des_size: str,
last_timestep: int = 128,
) -> dict:
"""Make a des config.
Args
----
commonsense_prob: commonsense probability
num_humans: number of humans
num_total_objects: number of total objects
maximum_num_objects_per_human: maximum number of objects per human
maximum_num_locations_per_object: maximum number of locations per object
maxiumum_days_period: maxiumum number of days period
des_size: The size of DES (i.e., "xxs", "xs", "s", "m", "l", "dev")
last_timestep: last time step where the DES terminates.
Returns
-------
des config
"""
des_config = {
"human_names_path": "./room_env/data/human-names",
"last_timestep": last_timestep,
"maxiumum_days_period": maxiumum_days_period,
"save_path": f"./room_env/data/des-config-{des_size}.json",
"seed": 42,
"semantic_knowledge_path": "./room_env/data/semantic-knowledge.json",
}
des_config["commonsense_prob"] = commonsense_prob
des_config["num_humans"] = num_humans
des_config["num_total_objects"] = num_total_objects
des_config["maximum_num_objects_per_human"] = maximum_num_objects_per_human
des_config["maximum_num_locations_per_object"] = maximum_num_locations_per_object
return des_config
def get_des_variables(des_size: str = "l") -> Tuple[int, int, int]:
"""Get the des variables.
Args
----
des_size: The size of DES (i.e., "xxs", "xs", "s", "m", "l", "dev")
Returns
-------
capacity, num_humans, num_total_objects
"""
if des_size == "dev":
capacity = 16
elif des_size == "xxs":
capacity = 2
elif des_size == "xs":
capacity = 4
elif des_size == "s":
capacity = 8
elif des_size == "m":
capacity = 16
elif des_size == "l":
capacity = 32
else:
raise ValueError
num_humans = capacity * 2
num_total_objects = capacity // 2
return capacity, num_humans, num_total_objects
def run_des_seeds(
seeds: list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
capacity: int = 32,
des_size: str = "l",
allow_random_human: bool = True,
allow_random_question: bool = True,
question_prob: float = 0.1,
) -> dict:
"""Run the RoomEnv-v1 with multiple different seeds.
Args
----
seeds:
capacity:
des_size:
allow_random_human:
allow_random_question:
question_prob:
Returns
-------
results
"""
results = {}
how_to_forget = ["episodic", "semantic", "random", "pre_sem"]
for forget_short in how_to_forget:
if forget_short == "random":
pretrain_semantic = False
capacity_ = {
"episodic": capacity // 2,
"semantic": capacity // 2,
"short": 1,
}
elif forget_short == "episodic":
pretrain_semantic = False
capacity_ = {"episodic": capacity, "semantic": 0, "short": 1}
elif forget_short == "semantic":
pretrain_semantic = False
capacity_ = {"episodic": 0, "semantic": capacity, "short": 1}
elif forget_short == "pre_sem":
pretrain_semantic = True
capacity_ = {
"episodic": capacity // 2,
"semantic": capacity // 2,
"short": 1,
}
else:
raise ValueError
results_ = []
for seed in seeds:
env = gym.make(
"RoomEnv-v1",
des_size=des_size,
seed=seed,
policies={
"memory_management": "rl",
"question_answer": "episodic_semantic",
"encoding": "argmax",
},
capacity=capacity_,
question_prob=question_prob,
observation_params="perfect",
allow_random_human=allow_random_human,
allow_random_question=allow_random_question,
pretrain_semantic=pretrain_semantic,
check_resources=False,
varying_rewards=False,
)
state, info = env.reset()
rewards = 0
while True:
if forget_short == "random":
action = random.choice([0, 1, 2])
elif forget_short == "episodic":
action = 0
elif forget_short == "semantic":
action = 1
elif forget_short == "pre_sem":
action = 0
else:
raise ValueError
state, reward, done, truncated, info = env.step(action)
rewards += reward
if done:
break
results_.append(rewards)
results[forget_short] = np.mean(results_)
return results
def run_all_des_configs(
des_size: str,
capacity: int,
maximum_num_objects_per_human: int,
maximum_num_locations_per_object: int,
maxiumum_days_period: int,
commonsense_prob: float,
num_humans: int,
num_total_objects: int,
seeds: list,
allow_random_human: bool,
allow_random_question: bool,
last_timestep: int,
question_prob: float,
) -> dict:
"""Run the RoomEnv-v1 with different des configs, with multiple different seeds.
Args
----
des_size: The size of DES (i.e., "xxs", "xs", "s", "m", "l", "dev")
capacity: int,
maximum_num_objects_per_human: maximum number of objects per human
maximum_num_locations_per_object: maximum number of locations per object
maxiumum_days_period: maxiumum number of days period
commonsense_prob: commonsense probability
num_humans: number of humans
num_total_objects: number of total objects
seeds: list,
allow_random_human: bool,
allow_random_question: bool,
last_timestep: int,
question_prob: float,
Returns
-------
results
"""
des_config = make_des_config(
commonsense_prob=commonsense_prob,
num_humans=num_humans,
num_total_objects=num_total_objects,
maximum_num_objects_per_human=maximum_num_objects_per_human,
maximum_num_locations_per_object=maximum_num_locations_per_object,
maxiumum_days_period=maxiumum_days_period,
des_size=des_size,
last_timestep=last_timestep,
)
complexity = (
num_humans
* num_total_objects
* maximum_num_objects_per_human
* maximum_num_locations_per_object
* maxiumum_days_period
)
with open("create_des_config.yaml", "w") as stream:
yaml.safe_dump(des_config, stream, indent=2)
sub_out = subprocess.call(
["python", "create_des_config.py"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if sub_out == 1:
return None
rewards = run_des_seeds(
seeds=seeds,
capacity=capacity,
des_size=des_size,
allow_random_human=allow_random_human,
allow_random_question=allow_random_question,
question_prob=question_prob,
)
results = {
"mean_rewards_diff": rewards["pre_sem"]
- rewards["random"] / 3
- rewards["semantic"] / 3
- rewards["episodic"] / 3,
"mean_rewards_episodic": rewards["episodic"],
"mean_rewards_semantic": rewards["semantic"],
"mean_rewards_random": rewards["random"],
"mean_rewards_pre_sem": rewards["pre_sem"],
"complexity": complexity,
"commonsense_prob": commonsense_prob,
"maximum_num_locations_per_object": maximum_num_locations_per_object,
"maximum_num_objects_per_human": maximum_num_objects_per_human,
"num_humans": num_humans,
"num_total_objects": num_total_objects,
"maxiumum_days_period": maxiumum_days_period,
"allow_random_human": allow_random_human,
"allow_random_question": allow_random_question,
"question_prob": question_prob,
}
return deepcopy(results)
def fill_des_resources(des_size: str) -> None:
"""Fill resources
Args
----
des_size
"""
des = RoomDes(des_size=des_size, check_resources=False)
des.run()
resources = {
foo: 9999
for foo in set(
[bar["object_location"] for foo in des.states for bar in foo.values()]
)
}
des.config["resources"] = deepcopy(resources)
write_json(des.config, f"./room_env/data/des-config-{des_size}.json")
resources = []
des = RoomDes(des_size=des_size, check_resources=True)
resources.append(deepcopy(des.resources))
while des.until > 0:
des.step()
des.until -= 1
resources.append(deepcopy(des.resources))
object_locations = deepcopy(list(des.resources.keys()))
resources = {
object_location: 9999
- min([resource[object_location] for resource in resources])
for object_location in object_locations
}
des.config["resources"] = deepcopy(resources)
write_json(des.config, f"./room_env/data/des-config-{des_size}.json")
des = RoomDes(des_size=des_size, check_resources=True)
def get_handcrafted(
env: str = "RoomEnv-v1",
des_size: str = "l",
seeds: list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
question_prob: float = 0.1,
observation_params: str = "perfect",
policies: dict = {
"memory_management": "rl",
"question_answer": "episodic_semantic",
"encoding": "argmax",
},
capacities: list = [2, 4, 8, 16, 32, 64],
allow_random_human: bool = False,
allow_random_question: bool = True,
varying_rewards: bool = False,
check_resources: bool = True,
) -> None:
"""Get the env results with handcrafted policies.
At the moment only {"memory_management": "rl"} is supported.
Args
----
env: str = "RoomEnv-v1",
des_size: str = "l",
seeds: list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
question_prob: float = 0.1,
policies: dict = {
"memory_management": "rl",
"question_answer": "episodic_semantic",
"encoding": "argmax",
},
capacities: list = [2, 4, 8, 16, 32, 64],
allow_random_human: whether to allow random humans to be observed.
allow_random_question: whether to allow random questions to be asked.
varying_rewards: If true, then the rewards are scaled in every episode so that
total_episode_rewards is 128.
Returns
-------
handcrafted_results
"""
how_to_forget = ["episodic", "semantic", "random", "pre_sem"]
env_ = env
handcrafted_results = {}
for capacity in capacities:
handcrafted_results[capacity] = {}
for forget_short in how_to_forget:
if forget_short == "random":
pretrain_semantic = False
capacity_ = {
"episodic": capacity // 2,
"semantic": capacity // 2,
"short": 1,
}
elif forget_short == "episodic":
pretrain_semantic = False
capacity_ = {"episodic": capacity, "semantic": 0, "short": 1}
elif forget_short == "semantic":
pretrain_semantic = False
capacity_ = {"episodic": 0, "semantic": capacity, "short": 1}
elif forget_short == "pre_sem":
pretrain_semantic = True
capacity_ = {
"episodic": capacity // 2,
"semantic": capacity // 2,
"short": 1,
}
else:
raise ValueError
results = []
for seed in seeds:
env = gym.make(
env_,
des_size=des_size,
seed=seed,
policies=policies,
capacity=capacity_,
question_prob=question_prob,
observation_params=observation_params,
allow_random_human=allow_random_human,
allow_random_question=allow_random_question,
pretrain_semantic=pretrain_semantic,
check_resources=check_resources,
varying_rewards=varying_rewards,
)
state, info = env.reset()
rewards = 0
while True:
if forget_short == "random":
action = random.choice([0, 1, 2])
elif forget_short == "episodic":
action = 0
elif forget_short == "semantic":
action = 1
elif forget_short == "pre_sem":
action = 0
else:
raise ValueError
state, reward, done, truncated, info = env.step(action)
rewards += reward
if done:
break
results.append(rewards)
mean_ = np.mean(results).round(3).item()
std_ = np.std(results).round(3).item()
handcrafted_results[capacity][forget_short] = {"mean": mean_, "std": std_}
return handcrafted_results | /room_env-1.0.2.tar.gz/room_env-1.0.2/room_env/utils.py | 0.763924 | 0.221182 | utils.py | pypi |
import json
import os
from copy import deepcopy
from pprint import pprint
def read_json(fname: str) -> dict:
"""Read json.
There is some path magic going on here. This is to account for both the production
and development mode. Don't use this function for a general purpose.
"""
fullpath = os.path.join(os.path.dirname(__file__), fname)
with open(fullpath, "r") as stream:
return json.load(stream)
class RoomDes:
"""RoomDes Class.
This class is very simple at the moment. When it's initialized, it places N_{humans}
in the room. They periodically move to other locations. They also periodically
change the location of their objects they are holding. At the moment,
everything is deterministic.
"""
def __init__(self, des_size: str = "l", check_resources: bool = True) -> None:
"""Instantiate the class.
Args
----
des_size: configuartion for the RoomDes simulation. It should be either size or
dict. size can be "xxs (extra extra small", "xs (extra small)", "s (small)",
"m (medium)", or "l (large)".
{"components": <COMPONENTS>, "resources": <RESOURCES>,
"last_timestep": <LAST_TIMESTEP>,
"semantic_knowledge": <SEMANTIC_KNOWLEDGE>, "complexity", <COMPLEXITY>}
<COMPONENTS> should look like this:
<RESOURCES> should look like this:
{'desk': 2, 'A': 10000, 'lap': 10000}
<LAST_TIMESTEP> is a number where the DES terminates.
<SEMANTIC_KNOWLEDGE> is a dictionary of semantic knowledge.
<COMPLEXITY> is defined as num_humans * num_total_objects
* maximum_num_objects_per_human * maximum_num_locations_per_object
check_resources: whether to check if the resources are depleted or not.
"""
if isinstance(des_size, str):
assert des_size.lower() in [
"dev",
"xxs",
"xs",
"s",
"m",
"l",
]
self.config = read_json(f"./data/des-config-{des_size.lower()}-v1.json")
else:
self.config = des_size
self.check_resources = check_resources
self._initialize()
def _initialize(self) -> None:
"""Initialize the simulator."""
self.components = deepcopy(self.config["components"])
self.resources = deepcopy(self.config["resources"])
self.semantic_knowledge = deepcopy(self.config["semantic_knowledge"])
self.humans = []
self.objects = []
self.object_locations = []
for human, obj_locs in self.components.items():
self.humans.append(human)
for obj, loc in obj_locs:
self.objects.append(obj)
self.object_locations.append(loc)
self.humans = sorted(list(set(self.humans)))
self.objects = sorted(list(set(self.objects)))
self.object_locations = sorted(list(set(self.object_locations)))
self.until = deepcopy(self.config["last_timestep"])
self.states = []
self.state = {}
for human in self.components:
self.state[human] = {}
(
self.state[human]["object"],
self.state[human]["object_location"],
) = self.components[human][0]
if self.check_resources:
self.resources[self.state[human]["object_location"]] -= 1
self.state[human]["current_time"] = 0
self.states.append(deepcopy(self.state))
if self.check_resources:
for key, val in self.resources.items():
assert val >= 0, f"{key}: {val}"
self.events = []
self.current_time = 0
def step(self) -> None:
"""Proceed time by one."""
previous_state = deepcopy(self.state)
previous_resources = deepcopy(self.resources)
self.current_time += 1
for human in self.state:
object_location_idx = self.current_time % len(self.components[human])
self.state[human]["current_time"] = self.current_time
if self.check_resources:
self.resources[self.state[human]["object_location"]] += 1
(
self.state[human]["object"],
self.state[human]["object_location"],
) = self.components[human][object_location_idx]
if self.check_resources:
self.resources[self.state[human]["object_location"]] -= 1
if self.check_resources:
for key, val in self.resources.items():
assert val >= 0, f"{key}: {val}"
current_state = deepcopy(self.state)
current_resources = deepcopy(self.resources)
self.event = self.check_event(
previous_state, previous_resources, current_state, current_resources
)
self.events.append(deepcopy(self.event))
self.states.append(deepcopy(self.state))
def check_event(
self,
previous_state: dict,
previous_resources: dict,
current_state: dict,
current_resources: dict,
) -> dict:
"""Check if any events have occured between the two consecutive states.
Args
----
previous_state
previous_resources
current_state
current_resources
Returns
-------
event
"""
assert len(previous_state) == len(current_state)
assert len(previous_resources) == len(current_resources)
state_changes = {}
resource_changes = {}
humans = list(previous_state)
for human in humans:
previous_object = previous_state[human]["object"]
previous_object_location = previous_state[human]["object_location"]
previous_time = previous_state[human]["current_time"]
current_object = current_state[human]["object"]
current_object_location = current_state[human]["object_location"]
current_time = current_state[human]["current_time"]
assert current_time == previous_time + 1
state_changes[human] = {}
if previous_object != current_object:
state_changes[human]["object"] = {
"previous": previous_object,
"current": current_object,
}
if previous_object_location != current_object_location:
state_changes[human]["object_location"] = {
"previous": previous_object_location,
"current": current_object_location,
}
if len(state_changes[human]) == 0:
del state_changes[human]
else:
state_changes[human]["current_time"] = current_time
for resource in previous_resources:
previous_amount = previous_resources[resource]
current_amount = current_resources[resource]
if previous_amount != current_amount:
resource_changes[resource] = current_amount - previous_amount
return {"state_changes": state_changes, "resource_changes": resource_changes}
def run(self, debug: bool = False) -> None:
"""Run until the RoomDes terminates."""
while self.until > 0:
self.step()
if debug:
pprint(self.event)
self.until -= 1 | /room_env-1.0.2.tar.gz/room_env-1.0.2/room_env/des.py | 0.670069 | 0.316119 | des.py | pypi |
import random
from .memory import ShortMemory
def encode_observation(memory_systems: dict, policy: str, obs: dict) -> None:
"""Non RL policy of encoding an observation into a short-term memory.
Args
----
memory_systems: {"episodic": EpisodicMemory, "semantic": SemanticMemory,
"short": ShortMemory}
policy: "argmax" or "neural"
obs: observation = {"human": <human>,
"object": <obj>,
"object_location": <obj_loc>}
"""
if policy.lower() == "argmax":
mem_short = ShortMemory.ob2short(obs)
else:
raise NotImplementedError
memory_systems["short"].add(mem_short)
def manage_memory(memory_systems: dict, policy: str) -> None:
"""Non RL memory management policy.
Args
----
memory_systems: {"episodic": EpisodicMemory, "semantic": SemanticMemory,
"short": ShortMemory}
policy: "episodic", "semantic", "forget", "random", or "neural"
"""
assert policy.lower() in [
"episodic",
"semantic",
"forget",
"random",
"neural",
]
if policy.lower() == "episodic":
if memory_systems["episodic"].capacity != 0:
if memory_systems["episodic"].is_full:
memory_systems["episodic"].forget_oldest()
mem_short = memory_systems["short"].get_oldest_memory()
mem_epi = ShortMemory.short2epi(mem_short)
memory_systems["episodic"].add(mem_epi)
elif policy.lower() == "semantic":
if memory_systems["semantic"].capacity != 0:
if memory_systems["semantic"].is_full:
memory_systems["semantic"].forget_weakest()
mem_short = memory_systems["short"].get_oldest_memory()
mem_sem = ShortMemory.short2sem(mem_short)
memory_systems["semantic"].add(mem_sem)
elif policy.lower() == "forget":
pass
elif policy.lower() == "random":
action_number = random.choice([0, 1, 2])
if action_number == 0:
if memory_systems["episodic"].is_full:
memory_systems["episodic"].forget_oldest()
mem_short = memory_systems["short"].get_oldest_memory()
mem_epi = ShortMemory.short2epi(mem_short)
memory_systems["episodic"].add(mem_epi)
elif action_number == 1:
if memory_systems["semantic"].is_full:
memory_systems["semantic"].forget_weakest()
mem_short = memory_systems["short"].get_oldest_memory()
mem_sem = ShortMemory.short2sem(mem_short)
memory_systems["semantic"].add(mem_sem)
else:
pass
elif policy.lower() == "neural":
raise NotImplementedError
else:
raise ValueError
memory_systems["short"].forget_oldest()
def answer_question(memory_systems: dict, policy: str, question: dict) -> str:
"""Non RL question answering policy.
Args
----
memory_systems: {"episodic": EpisodicMemory, "semantic": SemanticMemory,
"short": ShortMemory}
policy: "episodic_semantic", "semantic_episodic", "episodic", "semantic",
"random", or "neural",
question: question = {"human": <human>, "object": <obj>}
Returns
-------
pred: prediction
"""
assert policy.lower() in [
"episodic_semantic",
"semantic_episodic",
"episodic",
"semantic",
"random",
"neural",
]
pred_epi, _ = memory_systems["episodic"].answer_latest(question)
pred_sem, _ = memory_systems["semantic"].answer_strongest(question)
if policy.lower() == "episodic_semantic":
if pred_epi is None:
pred = pred_sem
else:
pred = pred_epi
elif policy.lower() == "semantic_episodic":
if pred_sem is None:
pred = pred_epi
else:
pred = pred_sem
elif policy.lower() == "episodic":
pred = pred_epi
elif policy.lower() == "semantic":
pred = pred_sem
elif policy.lower() == "random":
pred = random.choice([pred_epi, pred_sem])
elif policy.lower() == "neural":
raise NotImplementedError
else:
raise ValueError
return pred | /room_env-1.0.2.tar.gz/room_env-1.0.2/room_env/policy.py | 0.746231 | 0.521349 | policy.py | pypi |
import logging
import os
import random
from copy import deepcopy
from typing import Tuple
import gymnasium as gym
from ..des import RoomDes
from ..memory import EpisodicMemory, SemanticMemory, ShortMemory
from ..policy import answer_question, encode_observation, manage_memory
from ..utils import seed_everything
logging.basicConfig(
level=os.environ.get("LOGLEVEL", "INFO").upper(),
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
class RoomEnv1(gym.Env):
"""The Room environment version 1.
This env includes three state-action spaces. You have to choose which one of the
three will be RL trained.
Memory management.
State: episodic, semantic, and short-term memory systems at time t
Action: (0) Move the oldest short-term memory to the episodic,
(1) to the semantic, or (2) forget it
Question-answer
State: episodic and semantic memory systems at time t
Action: (0) Select the episodic memory system to answer the question, or
(1) the semantic
Encoding an observation to a short-term memory. The state space is
(i) triple-based, (ii) text-based, or (iii) image-based.
Triple
State: [(head_i, relation_i, tail_i) | i is from 1 to N]
Action: Choose one of the N triples (actions) to be encoded as
a short-term memory.
Text
State: [token_1, token_2, …, token_N]
Action: This is actually now N^3, where the first, second and third are to
choose head, relation, and tail, respectively.
Image
State: An image with objects
Action: Not sure yet …
"""
metadata = {"render.modes": ["console"]}
def __init__(
self,
des_size: str = "l",
seed: int = 42,
policies: dict = {
"memory_management": "RL",
"question_answer": "episodic_semantic",
"encoding": "argmax",
},
capacity: dict = {"episodic": 16, "semantic": 16, "short": 1},
question_prob: int = 1.0,
observation_params: str = "perfect",
allow_random_human: bool = False,
allow_random_question: bool = False,
total_episode_rewards: int = 128,
pretrain_semantic: bool = False,
check_resources: bool = True,
varying_rewards: bool = False,
) -> None:
"""
Args
----
des_size: "xxs", "xs", "s", "m", or "l".
seed: random seed number
policies:
memory_management:
"RL": Reinforcement learning to learn the policy.
"episodic": Always take action 1: move to the episodic.
"semantic": Always take action 2: move to the semantic.
"forget": Always take action 3: forget the oldest short-term memory.
"random": Take one of the three actions uniform-randomly.
"neural": Neural network policy
question_answer:
"RL": Reinforcement learning to learn the policy.
"episodic_semantic": First look up the episodic and then the semantic.
"semantic_episodic": First look up the semantic and then the episodic.
"episodic": Only look up the episodic.
"semantic": Only look up the semantic.
"random": Take one of the two actions uniform-randomly.
"neural": Neural network policy
encoding:
"RL": Reinforcement learning to learn the policy.
"argmax": Take the triple with the highest score.
"neural": Neural network policy
capacity: memory capactiy of the agent.
e.g., {"episodic": 1, "semantic": 1}
question_prob: The probability of a question being asked at every observation.
observation_params: At the moment this is only "perfect".
allow_random_human: whether or not to generate a random human sequence.
allow_random_question: whether or not to geneate a random question sequence.
total_episode_rewards: total episode rewards
pretrain_semantic: whether to prepopulate the semantic memory with ConceptNet
or not
check_resources: whether to check the resources in the DES.
varying_rewards: If true, then the rewards are scaled in every episode so that
total_episode_rewards is total_episode_rewards.
"""
self.seed = seed
seed_everything(self.seed)
self.policies = policies
assert len([pol for pol in self.policies.values() if pol.lower() == "rl"]) == 1
self.capacity = capacity
self.question_prob = question_prob
self.observation_params = observation_params
self.allow_random_human = allow_random_human
self.allow_random_question = allow_random_question
self.total_episode_rewards = total_episode_rewards
self.pretrain_semantic = pretrain_semantic
self.check_resources = check_resources
self.varying_rewards = varying_rewards
# Our state space is quite complex. Here we just make a dummy observation space.
# to bypass the sanity check.
self.observation_space = gym.spaces.Discrete(1)
if self.policies["memory_management"].lower() == "rl":
# 0 for episodic, 1 for semantic, and 2 to forget
self.action_space = gym.spaces.Discrete(3)
if self.policies["question_answer"].lower() == "rl":
# 0 for episodic and 1 for semantic
self.action_space = gym.spaces.Discrete(2)
if self.policies["encoding"].lower() == "rl":
raise NotImplementedError
self.des_size = des_size
self.des = RoomDes(
des_size=self.des_size,
check_resources=self.check_resources,
)
assert 0 < self.question_prob <= 1
self.init_memory_systems()
def init_memory_systems(self) -> None:
"""Initialize the agent's memory systems."""
self.memory_systems = {
"episodic": EpisodicMemory(capacity=self.capacity["episodic"]),
"semantic": SemanticMemory(capacity=self.capacity["semantic"]),
"short": ShortMemory(capacity=self.capacity["short"]),
}
if self.pretrain_semantic:
assert self.capacity["semantic"] > 0
_ = self.memory_systems["semantic"].pretrain_semantic(
self.des.semantic_knowledge,
return_remaining_space=False,
freeze=False,
)
def generate_sequences(self) -> None:
"""Generate human and question sequences in advance."""
if self.observation_params.lower() == "perfect":
if self.allow_random_human:
self.human_sequence = random.choices(
list(self.des.humans), k=self.des.until + 1
)
else:
self.human_sequence = (
self.des.humans * (self.des.until // len(self.des.humans) + 1)
)[: self.des.until + 1]
else:
raise NotImplementedError
if self.allow_random_question:
self.question_sequence = [
random.choice(self.human_sequence[: i + 1])
for i in range(len(self.human_sequence))
]
else:
self.question_sequence = [self.human_sequence[0]]
self.des.run()
assert (
len(self.des.states)
== len(self.des.events) + 1
== len(self.human_sequence)
)
for i in range(len(self.human_sequence) - 1):
start = max(i + 2 - len(self.des.humans), 0)
end = i + 2
humans_observed = self.human_sequence[start:end]
current_state = self.des.states[end - 1]
humans_not_changed = []
for j, human in enumerate(humans_observed):
observed_state = self.des.states[start + j]
is_changed = False
for to_check in ["object", "object_location"]:
if (
current_state[human][to_check]
!= observed_state[human][to_check]
):
is_changed = True
if not is_changed:
humans_not_changed.append(human)
self.question_sequence.append(random.choice(humans_not_changed))
self.des._initialize()
effective_question_sequence = []
for i, question in enumerate(self.question_sequence[:-1]):
if random.random() < self.question_prob:
effective_question_sequence.append(question)
else:
effective_question_sequence.append(None)
# The last observation shouldn't have a question
effective_question_sequence.append(None)
self.question_sequence = effective_question_sequence
assert len(self.human_sequence) == len(self.question_sequence)
self.num_questions = sum(
[True for question in self.question_sequence if question is not None]
)
if self.varying_rewards:
self.CORRECT = self.total_episode_rewards / self.num_questions
self.WRONG = -self.CORRECT
else:
self.CORRECT = 1
self.WRONG = -1
@staticmethod
def extract_memory_entires(memory_systems: dict) -> dict:
"""Extract the entries from the Memory objects.
Ars
---
memory_systems: {"episodic": EpisodicMemory, "semantic": SemanticMemory,
"short": ShortMemory}
Returns
-------
memory_systems_: memory_systems only with entries.
"""
memory_systems_ = {}
for key, value in memory_systems.items():
memory_systems_[key] = deepcopy(value.entries)
return memory_systems_
def generate_oqa(
self, increment_des: bool = False
) -> Tuple[dict, dict, dict, bool]:
"""Generate an observation, question, and answer.
Args
----
increment_des: whether or not to take a step in the DES.
Returns
-------
observation = {
"human": <human>,
"object": <obj>,
"object_location": <obj_loc>,
}
question = {"human": <human>, "object": <obj>}
answer = <obj_loc>
is_last: True, if its the last observation in the queue, othewise False
"""
human_o = self.human_sequence.pop(0)
human_q = self.question_sequence.pop(0)
is_last_o = len(self.human_sequence) == 0
is_last_q = len(self.question_sequence) == 0
assert is_last_o == is_last_q
is_last = is_last_o
if increment_des:
self.des.step()
obj_o = self.des.state[human_o]["object"]
obj_loc_o = self.des.state[human_o]["object_location"]
observation = deepcopy(
{
"human": human_o,
"object": obj_o,
"object_location": obj_loc_o,
"current_time": self.des.current_time,
}
)
if human_q is not None:
obj_q = self.des.state[human_q]["object"]
obj_loc_q = self.des.state[human_q]["object_location"]
question = deepcopy({"human": human_q, "object": obj_q})
answer = deepcopy(obj_loc_q)
else:
question = None
answer = None
return observation, question, answer, is_last
def reset(self) -> dict:
"""Reset the environment.
Returns
-------
state
"""
self.des._initialize()
self.generate_sequences()
self.init_memory_systems()
info = {}
self.obs, self.question, self.answer, self.is_last = self.generate_oqa(
increment_des=False
)
if self.policies["encoding"].lower() == "rl":
return deepcopy(self.obs), info
if self.policies["memory_management"].lower() == "rl":
encode_observation(self.memory_systems, self.policies["encoding"], self.obs)
return deepcopy(self.extract_memory_entires(self.memory_systems)), info
if self.policies["question_answer"].lower() == "rl":
encode_observation(self.memory_systems, self.policies["encoding"], self.obs)
manage_memory(self.memory_systems, self.policies["memory_management"])
while True:
if (self.question is None) and (self.answer is None):
(
self.obs,
self.question,
self.answer,
self.is_last,
) = self.generate_oqa(increment_des=True)
encode_observation(
self.memory_systems, self.policies["encoding"], self.obs
)
manage_memory(
self.memory_systems, self.policies["memory_management"]
)
else:
return {
"memory_systems": deepcopy(
self.extract_memory_entires(self.memory_systems)
),
"question": deepcopy(self.question),
}, info
raise ValueError
def step(self, action: int) -> Tuple[Tuple, int, bool, bool, dict]:
"""An agent takes an action.
Args
----
action: This depends on the state
Returns
-------
state, reward, done, truncated, info
"""
info = {}
truncated = False
if self.policies["encoding"].lower() == "rl":
# This is a dummy code
self.obs = self.obs[action]
encode_observation(self.memory_systems, self.policies["encoding"], self.obs)
manage_memory(self.memory_systems, self.policies["memory_management"])
if (self.question is None) and (self.answer is None):
reward = 0
else:
pred = answer_question(
self.memory_systems, self.policies["question_answer"], self.question
)
if str(pred).lower() == self.answer:
reward = self.CORRECT
else:
reward = self.WRONG
self.obs, self.question, self.answer, self.is_last = self.generate_oqa(
increment_des=True
)
state = deepcopy(self.obs)
if self.is_last:
done = True
else:
done = False
return state, reward, done, truncated, info
if self.policies["memory_management"].lower() == "rl":
if action == 0:
manage_memory(self.memory_systems, "episodic")
elif action == 1:
manage_memory(self.memory_systems, "semantic")
elif action == 2:
manage_memory(self.memory_systems, "forget")
else:
raise ValueError
if (self.question is None) and (self.answer is None):
reward = 0
else:
pred = answer_question(
self.memory_systems, self.policies["question_answer"], self.question
)
if str(pred).lower() == self.answer:
reward = self.CORRECT
else:
reward = self.WRONG
self.obs, self.question, self.answer, self.is_last = self.generate_oqa(
increment_des=True
)
encode_observation(self.memory_systems, self.policies["encoding"], self.obs)
state = deepcopy(self.extract_memory_entires(self.memory_systems))
if self.is_last:
done = True
else:
done = False
return state, reward, done, truncated, info
if self.policies["question_answer"].lower() == "rl":
if action == 0:
pred = answer_question(self.memory_systems, "episodic", self.question)
elif action == 1:
pred = answer_question(self.memory_systems, "semantic", self.question)
else:
raise ValueError
if str(pred).lower() == self.answer:
reward = self.CORRECT
else:
reward = self.WRONG
while True:
(
self.obs,
self.question,
self.answer,
self.is_last,
) = self.generate_oqa(increment_des=True)
encode_observation(
self.memory_systems, self.policies["encoding"], self.obs
)
manage_memory(self.memory_systems, self.policies["memory_management"])
if self.is_last:
state = None
done = True
return state, reward, done, truncated, info
else:
done = False
if (self.question is not None) and (self.answer is not None):
state = {
"memory_systems": deepcopy(
self.extract_memory_entires(self.memory_systems)
),
"question": deepcopy(self.question),
}
return state, reward, done, truncated, info
def render(self, mode="console") -> None:
if mode != "console":
raise NotImplementedError()
else:
pass | /room_env-1.0.2.tar.gz/room_env-1.0.2/room_env/envs/room1.py | 0.821617 | 0.524821 | room1.py | pypi |
import json
import logging
import os
import random
from copy import deepcopy
from itertools import cycle
from typing import Tuple
import gymnasium as gym
from ..utils import read_lines, remove_name, split_name_entity
CORRECT = 1
WRONG = 0
logging.basicConfig(
level=os.environ.get("LOGLEVEL", "INFO").upper(),
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
def read_json(fname: str) -> dict:
"""Read json.
There is some path magic going on here. This is to account for both the production
and development mode. Don't use this function for a general purpose.
"""
fullpath = os.path.join(os.path.dirname(__file__), "../", fname)
with open(fullpath, "r") as stream:
return json.load(stream)
class RoomEnv0(gym.Env):
"""The Room environment version 0.
In this big room, N_{agents} move around and observe N_{humans} placing objects.
Every time the agents move around (i.e., take a step), they observe one human_{i}
placing an object somewhere. Each agent can only observe one human at a time.
Every time the agents takes a step, the environment also asks them where an object
is. +1 reward is given when it gets right and 0 when it gets wrong.
This environment is challenging in two ways:
(1) An agent can't observe the entire room. It can only observe one human at a time.
This means that the environment is only partially observable. This constraint means
that it's more beneficial if more than one agent can collaborate. This constraint
also means that the agents should have a memory system to remember the past
observations.
(2) The room environment changes every time. The humans can switch their locations,
change their objects, and place them at different locations. These changes are
not completely random. A decent portion of them come from commmon-sense knowledge.
This means that an agent with both episodic and semantic memory systems will perform
better than an agent with only one memory system.
"""
metadata = {"render.modes": ["console"]}
def __init__(
self,
room_size: str = "small",
weighting_mode: str = "highest",
probs: dict = {
"commonsense": 0.7,
"new_location": 0.1,
"new_object": 0.1,
"switch_person": 0.5,
},
limits: dict = {
"heads": None,
"tails": None,
"names": None,
"allow_spaces": False,
},
max_step: int = 1000,
disjoint_entities: bool = True,
num_agents: int = 1,
) -> None:
"""Initialize the environment.
Args
----
room_size: small or big
weighting_mode: Either "weighted" or "highest"
probs: the probabilities that govern the room environment changes.
limits: Limitation on the triples.
max_step: maximum step an agent can take. The environment will terminate when
the number reaches this value.
disjoint_entities: Assure that the heads and the tails don't overlap.
num_agents: number of agents in the room.
"""
super().__init__()
self.room_size = room_size
if self.room_size.lower() == "small":
semantic_knowledge_path = "./data/semantic-knowledge-small.json"
names_path = "./data/top-human-names-small"
elif self.room_size.lower() in ["large", "big"]:
semantic_knowledge_path = "./data/semantic-knowledge.json"
names_path = "./data/top-human-names"
logging.debug("Creating an Observation-Question-Answer generator object ...")
self.limits = limits
if set(list(self.limits.values())) != {None}:
logging.warning(f"The obserations will be limited by {self.limits}")
(
self.semantic_knowledge,
self.heads,
self.relations,
self.tails,
) = self.load_semantic_knowledge(
semantic_knowledge_path,
limit_heads=self.limits["heads"],
limit_tails=self.limits["tails"],
allow_spaces=self.limits["allow_spaces"],
disjoint_entities=disjoint_entities,
)
assert len(self.relations) == 1, "At the moment there is only one relation."
self.names = self.read_names(
names_path, self.limits["names"], self.limits["allow_spaces"]
)
assert len(self.names) <= len(self.heads)
if disjoint_entities:
lhs = len(set(self.relations + self.names + self.heads + self.tails))
rhs = (
len(self.relations)
+ len(self.names)
+ len(self.heads)
+ len(self.tails)
)
assert lhs == rhs
self.weighting_mode = weighting_mode
self.probs = probs
self.max_step = max_step
self.num_agents = num_agents
# Our state / action space is quite complex. Here we just make a dummy
# observation space to bypass the sanity check.
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(1)
def reset(self) -> Tuple:
"""Reset the environment.
This will place N_{humans} humans in the room. Each human only has one object,
which will be placed by the human in a random location.
"""
self.step_counter = 0
random.shuffle(self.names)
random.shuffle(self.heads)
self.room = []
for name, head in zip(self.names, self.heads):
relation = self.relations[0] # At the moment there is only one relation.
tail = self.generate_tail(head, relation)
if tail is not None:
self.room.append([f"{name}'s {head}", relation, f"{name}'s {tail}"])
navigate = [[i for i in range(len(self.room))] for _ in range(self.num_agents)]
for navigate_ in navigate:
random.shuffle(navigate_)
self.navigate = [cycle(navigate_) for navigate_ in navigate]
observations = self.generate_observations()
question, self.answer = self.generate_qa()
info = {}
return (observations, question), info
def generate_observations(self) -> list:
"""Generate a random obseration.
Returns
-------
observations: e.g., ["Tae's laptop, "AtLocation", "Tae's desk", 10]
The last element in the list accounts for the timestamp.
"""
observations = {
i: deepcopy([*self.room[next(navigate_)], self.step_counter])
for i, navigate_ in enumerate(self.navigate)
}
return observations
def generate_qa(self) -> Tuple[list, str]:
"""Generate a question and the answer.
Returns
-------
question: e.g., ["Tae's laptop", "AtLocation"]
answer: e.g., "Tae's desk"
"""
random_choice = random.choice(self.room)
question = random_choice[:2]
answer = remove_name(random_choice[-1])
return question, answer
def generate_tail(self, head: str, relation: str) -> str:
"""This simulates humans placing their objects in their desired locations.
Note that "head" shouldn't include a human name.
"""
if random.random() < self.probs["commonsense"]:
logging.debug(f"Generating a common location for {head} ...")
tails = self.semantic_knowledge[head][relation]
if len(tails) == 0:
return None
if self.weighting_mode == "weighted":
tail = random.choices(
[tail["tail"] for tail in tails],
weights=[tail["weight"] for tail in tails],
k=1,
)[0]
elif self.weighting_mode == "highest":
tail = sorted(
self.semantic_knowledge[head][relation], key=lambda x: x["weight"]
)[-1]["tail"]
else:
raise ValueError
else:
logging.debug(f"Generating a NON common location for {head} ...")
while True:
tail = random.choice(self.tails)
if tail not in self.semantic_knowledge[head][relation]:
break
return tail
def renew(self) -> None:
"""Renew the room.
This is done every time when the agent takes a step. This is doen to simulate
that the room changes. People move. They place their objects in different
locations. They also change their objects. All of these are done in a random
manner. The randomness can be adjusted from the argument `probs`.
With the chance of probs["new_location"], an object will be placed at a new
location.
When the object is placed at a new locaiton, with the chance of
probs["commonsense"], an object will be placed at a commonsense-knowledge spot.
With the chance of probs["new_object"], the person with the object will have
a new random object.
With the chance of probs["switch_person"], two persons switch their spots.
"""
room = []
for head, relation, tail in self.room:
name1, head = split_name_entity(head)
name2, tail = split_name_entity(tail)
assert name1 == name2, "we don't do name mixing at this moment."
if random.random() < self.probs["new_object"]:
while True:
new_head = random.choice(self.heads)
if new_head != head:
head = new_head
tail = self.generate_tail(head, relation)
break
else:
if random.random() < self.probs["new_location"]:
while True:
new_tail = self.generate_tail(head, relation)
if new_tail != tail:
tail = new_tail
break
room.append(
[
f"{name1}'s {deepcopy(head)}",
deepcopy(relation),
f"{name2}'s {deepcopy(tail)}",
],
)
if random.random() < self.probs["switch_person"]:
i, j = random.sample(
range(
0,
len(
self.room,
),
),
2,
)
room[i], room[j] = room[j], room[i]
self.room = room
def step(self, action: str) -> Tuple[Tuple, int, bool, bool, dict]:
"""An agent takes an action.
Args
----
action: This is the agent's answer to the previous question.
"""
if str(action).lower() == self.answer.lower():
logging.info(
f"The prediction ({action}) matches the answer ({self.answer})!"
)
reward = CORRECT
else:
logging.info(
f"The prediction ({action}) does NOT match the answer ({self.answer})!"
)
reward = WRONG
self.step_counter += 1
# Things will change in the room!
self.renew()
observations = self.generate_observations()
question, self.answer = self.generate_qa()
info = {}
if self.step_counter >= self.max_step:
done = True
else:
done = False
truncated = False
return (observations, question), reward, done, truncated, info
def render(self, mode="console") -> None:
if mode != "console":
raise NotImplementedError()
else:
pass
def close(self):
pass
@staticmethod
def read_names(
path: str = "./data/top-human-names",
limit_names: int = None,
allow_spaces: bool = False,
) -> list:
"""Read 20 most common names.
Args
----
path: The path to the top 20 human name list.
limit_names: Limit the number of names
allow_spaces: Whether to include words that have spaces
(e.g., corner of two streets)
Returns
-------
names: human names (e.g., James)
"""
names = read_lines(path)
if not allow_spaces:
names = [name for name in names if len(name.split("_")) == 1]
if limit_names:
logging.warning(f"The number of names will be limited to {limit_names}")
names = sorted(names, key=len)
names = names[:limit_names]
logging.info(f"Reading {path} complete! There are {len(names)} names in total")
return names
@staticmethod
def load_semantic_knowledge(
path: str,
limit_heads: int = None,
limit_tails: int = None,
allow_spaces: bool = False,
disjoint_entities: bool = True,
) -> Tuple[list, list, list, list]:
"""Load saved semantic knowledge.
Args
----
path: the path to the pretrained semantic memory.
limit_heads: Limit the number of heads (e.g., 10)
limit_tails: Limit the number of tails per heads (e.g., 1)
allow_spaces: Whether to include words that have spaces
(e.g., corner of two streets)
disjoint_entities: Whether to force that there are no common elements between
entities.
Returns
-------
semantic_knowledge
heads
relations
tails
"""
logging.debug(f"loading the semantic knowledge from {path}...")
semantic_knowledge = read_json(path)
heads = sorted(list(set(semantic_knowledge.keys())))
if disjoint_entities:
logging.warning("Tails that are heads will be removed.")
semantic_knowledge = {
key: {
key_: [tail for tail in val_ if tail["tail"] not in heads]
for key_, val_ in val.items()
}
for key, val in semantic_knowledge.items()
}
semantic_knowledge = {
key: {
key_: val_
for key_, val_ in val.items()
if len([tail for tail in val_ if tail["tail"]]) > 0
}
for key, val in semantic_knowledge.items()
}
semantic_knowledge = {
key: val for key, val in semantic_knowledge.items() if len(val) > 0
}
logging.info("empty entities are removed")
# sort the semantic knowledge by its highest weight to be sure.
semantic_knowledge = {
key: {
key_: sorted(val_, key=lambda x: -x["weight"])
for key_, val_ in val.items()
}
for key, val in semantic_knowledge.items()
}
if not allow_spaces:
semantic_knowledge = {
key: {
key_: [v for v in val_ if len(v["tail"].split("_")) == 1]
for key_, val_ in val.items()
}
for key, val in semantic_knowledge.items()
if (len(key.split("_"))) == 1
}
if limit_heads:
logging.warning(f"Limiting the number of heads to {limit_heads} ...")
semantic_knowledge = {
key: val
for idx, (key, val) in enumerate(semantic_knowledge.items())
if idx < limit_heads
}
if limit_tails:
logging.warning(
f"Limiting the number of tails per head to {limit_tails} ..."
)
semantic_knowledge = {
key: {key_: val_[:limit_tails] for key_, val_ in val.items()}
for key, val in semantic_knowledge.items()
}
heads = sorted(list(set(semantic_knowledge.keys())))
tails = sorted(
list(
set(
[
foo["tail"]
for key, val in semantic_knowledge.items()
for key_, val_ in val.items()
for foo in val_
]
)
)
)
relations = sorted(
list(
set(
[
key_
for key, val in semantic_knowledge.items()
for key_, val_ in val.items()
]
)
)
)
logging.info(f"semantic knowledge successfully loaded from {path}!")
return semantic_knowledge, heads, relations, tails | /room_env-1.0.2.tar.gz/room_env-1.0.2/room_env/envs/room0.py | 0.785761 | 0.318366 | room0.py | pypi |
MQTT_ERROR_MESSAGES = {
0: None,
1: "Bad protocol",
2: "Bad client id",
3: "Server unavailable",
4: "Bad username or password",
5: "Not authorised",
}
ROOMBA_ERROR_MESSAGES = {
0: "None",
1: "Left wheel off floor",
2: "Main brushes stuck",
3: "Right wheel off floor",
4: "Left wheel stuck",
5: "Right wheel stuck",
6: "Stuck near a cliff",
7: "Left wheel error",
8: "Bin error",
9: "Bumper stuck",
10: "Right wheel error",
11: "Bin error",
12: "Cliff sensor issue",
13: "Both wheels off floor",
14: "Bin missing",
15: "Reboot required",
16: "Bumped unexpectedly",
17: "Path blocked",
18: "Docking issue",
19: "Undocking issue",
20: "Docking issue",
21: "Navigation problem",
22: "Navigation problem",
23: "Battery issue",
24: "Navigation problem",
25: "Reboot required",
26: "Vacuum problem",
27: "Vacuum problem",
29: "Software update needed",
30: "Vacuum problem",
31: "Reboot required",
32: "Smart map problem",
33: "Path blocked",
34: "Reboot required",
35: "Unrecognised cleaning pad",
36: "Bin full",
37: "Tank needed refilling",
38: "Vacuum problem",
39: "Reboot required",
40: "Navigation problem",
41: "Timed out",
42: "Localization problem",
43: "Navigation problem",
44: "Pump issue",
45: "Lid open",
46: "Low battery",
47: "Reboot required",
48: "Path blocked",
52: "Pad required attention",
53: "Software update required",
65: "Hardware problem detected",
66: "Low memory",
68: "Hardware problem detected",
73: "Pad type changed",
74: "Max area reached",
75: "Navigation problem",
76: "Hardware problem detected",
88: "Back-up refused",
89: "Mission runtime too long",
101: "Battery isn't connected",
102: "Charging error",
103: "Charging error",
104: "No charge current",
105: "Charging current too low",
106: "Battery too warm",
107: "Battery temperature incorrect",
108: "Battery communication failure",
109: "Battery error",
110: "Battery cell imbalance",
111: "Battery communication failure",
112: "Invalid charging load",
114: "Internal battery failure",
115: "Cell failure during charging",
116: "Charging error of Home Base",
118: "Battery communication failure",
119: "Charging timeout",
120: "Battery not initialized",
122: "Charging system error",
123: "Battery not initialized",
}
ROOMBA_READY_MESSAGES = {
0: 'N/A',
2: 'Uneven Ground',
15: 'Low Battery',
39: 'Pending',
48: 'Path Blocked'
}
ROOMBA_STATES = {
"charge": "Charging",
"new": "New Mission",
"run": "Running",
"resume": "Running",
"hmMidMsn": "Recharging",
"recharge": "Recharging",
"stuck": "Stuck",
"hmUsrDock": "User Docking",
"dock": "Docking",
"dockend": "Docking - End Mission",
"cancelled": "Cancelled",
"completed": "Mission Completed",
"stop": "Stopped",
"pause": "Paused",
"hmPostMsn": "End Mission",
"evac": "Emptying Bin",
"chargingerror": "Base Unplugged",
"": None,
} | /roombasdk-1.7.10-py3-none-any.whl/roombapy/const.py | 0.637595 | 0.341665 | const.py | pypi |
import io
import os
from pathlib import Path
from typing import Union, Type, TypeVar
import dotenv
from environment_files import find_environment
from liblog import get_logger
import root_dir.data_dir as data_dir
logger = get_logger()
# Type hint help
DD = TypeVar("DD", bound=data_dir.DataDir)
class DataRoot:
logger = get_logger(name_suffix=".DataRoot")
def __init__(
self,
data_root: Union[str, Path],
create_if_needed: bool = False,
create_parents_if_needed: bool = False,
):
self.logger.info("Using provided data_root:")
self.logger.info(str(data_root))
data_root = Path(data_root).absolute()
try:
assert data_root.exists()
except AssertionError as e:
if create_if_needed:
self.logger.info("Creating data root directory:")
self.logger.info(str(data_root))
data_root.mkdir(exist_ok=True, parents=create_parents_if_needed)
else:
raise FileNotFoundError() from e
self.data_root = data_root
def __getattr__(self, item):
return getattr(self.data_root, item)
@classmethod
def from_env_var(
cls,
env_var_name: str = "DATA_ROOT",
env_var_prefix: str = None,
capitalize_env_var: bool = True,
override_env_var: bool = False,
set_env_var: bool = True,
explicit_env_file_path: Union[str, Path] = None,
env_file_name: str = ".env",
find_env_file_from_main: bool = False,
find_env_file_from_cwd: bool = False,
stack_height_above_caller: int = 0,
stop_env_file_search_at: Union[str, Path] = "root",
create_if_needed: bool = False,
create_parents_if_needed: bool = False,
):
env_var_name = cls.setup_env_var_name(
env_var_name, env_var_prefix, capitalize_env_var
)
if not override_env_var:
try:
data_root = os.environ[env_var_name]
return cls(
data_root,
create_if_needed=create_if_needed,
create_parents_if_needed=create_parents_if_needed,
)
except KeyError:
pass
if explicit_env_file_path is not None:
explicit_env_file_path = Path(explicit_env_file_path).absolute()
if explicit_env_file_path.is_file():
env_file_name = explicit_env_file_path.name
explicit_env_file_path = explicit_env_file_path.parent
else:
assert explicit_env_file_path.is_dir()
try:
env_file = find_environment(
file_name=env_file_name,
from_main=find_env_file_from_main,
from_cwd=find_env_file_from_cwd,
from_path=explicit_env_file_path,
stop_at=stop_env_file_search_at,
raise_error=False,
)
except ValueError:
logger.error("default run mode: from_caller")
env_file = find_environment(
file_name=env_file_name,
from_caller=True,
stop_at=stop_env_file_search_at,
stack_height_above_caller=stack_height_above_caller + 1,
)
env_values = dotenv.dotenv_values(env_file)
data_root = env_values[env_var_name]
if set_env_var:
os.environ[env_var_name] = data_root
return cls(
data_root,
create_if_needed=create_if_needed,
create_parents_if_needed=create_parents_if_needed,
)
@classmethod
def setup_env_file(
cls,
env_file_path: Union[str, Path],
data_root: Union[str, Path],
env_file_name: str = ".env",
env_var_name: str = "DATA_ROOT",
env_var_prefix: str = None,
capitalize_env_var: bool = True,
):
env_file_path = Path(env_file_path).absolute()
if env_file_path.is_dir():
env_file_path = env_file_path / env_file_name
env_var_name = cls.setup_env_var_name(
env_var_name, env_var_prefix, capitalize_env_var
)
data_root = Path(data_root)
env_values = dotenv.dotenv_values(env_file_path)
if env_var_name in env_values:
assert (
Path(env_values[env_var_name]).resolve() == data_root.resolve()
), f"Variable {env_var_name} is already set to a different value in env file {str(env_file_path)}"
cls.logger.info(
f"Variable {env_var_name} is already set in env file {str(env_file_path)}"
)
else:
with open(env_file_path, "rb+") as env_file:
env_file.seek(-1, io.SEEK_END)
last_char = env_file.read(1)
if len(last_char) == 0 or last_char == b"\n":
env_file.write(
f"{env_var_name}={str(data_root.absolute())}".encode()
)
else:
env_file.write(
f"\n{env_var_name}={str(data_root.absolute())}".encode()
)
@staticmethod
def setup_env_var_name(env_var_name, env_var_prefix, capitalize_env_var):
if env_var_prefix is not None:
env_var_name = f"{env_var_prefix}_{env_var_name}"
if capitalize_env_var:
env_var_name = env_var_name.upper()
return env_var_name
def get_data_dir(
self,
relpath: Path,
data_dir_cls: Type[DD] = data_dir.DataDir,
**kwargs,
) -> DD:
return data_dir_cls(data_relpath=relpath, data_root=self, **kwargs) | /root_dir-0.1.0-py3-none-any.whl/root_dir/data_root.py | 0.496582 | 0.194827 | data_root.py | pypi |
import tldextract
import validators
import sys
import argparse
from typing import Any, Iterator, Optional
import logging
logger = logging.Logger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"domain",
help="domain or file with domains to process. "
"If none then stdin will be use",
nargs="*",
)
parser.add_argument(
"-d", "--depth",
type=int,
default=1,
help="Depth of retrieved subdomains. 1 returns the root domain."
" Default = 1"
)
parser.add_argument(
"-D", "--max-depth",
type=int,
default=1,
help="Max depth of retrieved subdomains. Default = 1"
)
parser.add_argument(
"-u", "--unique",
action="store_true",
help="Avoid retrieving duplicate domains"
)
args = parser.parse_args()
if args.depth < 1:
args.depth = 1
if args.depth > args.max_depth:
args.max_depth = args.depth
return args
def main():
args = parse_args()
try:
retrieved_domains = {}
for domain in read_text_targets(args.domain):
for root_domain in get_root_domains(
domain,
min_depth=args.depth-1,
max_depth=args.max_depth-1
):
if args.unique and root_domain in retrieved_domains:
continue
print(root_domain)
retrieved_domains[root_domain] = True
except KeyboardInterrupt:
pass
def get_root_domains(domain, min_depth=0, max_depth=0):
domains_tree = []
domain_parts = tldextract.extract(domain)
root_domain = "{}.{}".format(domain_parts.domain, domain_parts.suffix)
subdomains = domain_parts.subdomain.split(".") if domain_parts.subdomain else []
depth = min_depth
if depth == 0:
domains_tree.append(root_domain)
depth += 1
for depth in range(depth, max_depth+1):
if depth > len(subdomains):
break
subs = ".".join(subdomains[-depth:])
domains_tree.append("{}.{}".format(subs, root_domain))
return domains_tree
def read_text_targets(targets: Any) -> Iterator[str]:
yield from read_text_lines(read_targets(targets))
def read_targets(targets: Optional[Any]) -> Iterator[str]:
"""Function to process the program ouput that allows to read an array
of strings or lines of a file in a standard way. In case nothing is
provided, input will be taken from stdin.
"""
if not targets:
yield from sys.stdin
for target in targets:
try:
with open(target) as fi:
yield from fi
except FileNotFoundError:
yield target
def read_text_lines(fd: Iterator[str]) -> Iterator[str]:
"""To read lines from a file and skip empty lines or those commented
(starting by #)
"""
for line in fd:
line = line.strip()
if line == "":
continue
if line.startswith("#"):
continue
if validators.domain(line) is not True:
logger.warn("Invalid domain {}".format(line))
continue
yield line | /root-domain-0.0.1.tar.gz/root-domain-0.0.1/root_domain/main.py | 0.518546 | 0.160233 | main.py | pypi |
from . import _librootnumpy
__all__ = [
'random_sample',
]
def random_sample(obj, n_samples, seed=None):
"""Create a random array by sampling a ROOT function or histogram.
Parameters
----------
obj : TH[1|2|3] or TF[1|2|3]
The ROOT function or histogram to sample.
n_samples : positive int
The number of random samples to generate.
seed : None, positive int or 0, optional (default=None)
The random seed, set via ROOT.gRandom.SetSeed(seed):
http://root.cern.ch/root/html/TRandom3.html#TRandom3:SetSeed
If 0, the seed will be random. If None (the default), ROOT.gRandom will
not be touched and the current seed will be used.
Returns
-------
array : a numpy array
A numpy array with a shape corresponding to the dimensionality of the
function or histogram. A flat array is returned when sampling TF1 or
TH1. An array with shape [n_samples, n_dimensions] is returned when
sampling TF2, TF3, TH2, or TH3.
Examples
--------
>>> from root_numpy import random_sample
>>> from ROOT import TF1, TF2, TF3
>>> random_sample(TF1("f1", "TMath::DiLog(x)"), 10000, seed=1)
array([ 0.68307934, 0.9988919 , 0.87198158, ..., 0.50331049,
0.53895257, 0.57576984])
>>> random_sample(TF2("f2", "sin(x)*sin(y)/(x*y)"), 10000, seed=1)
array([[ 0.93425084, 0.39990616],
[ 0.00819315, 0.73108525],
[ 0.00307176, 0.00427081],
...,
[ 0.66931215, 0.0421913 ],
[ 0.06469985, 0.10253632],
[ 0.31059832, 0.75892702]])
>>> random_sample(TF3("f3", "sin(x)*sin(y)*sin(z)/(x*y*z)"), 10000, seed=1)
array([[ 0.03323949, 0.95734415, 0.39775191],
[ 0.07093748, 0.01007775, 0.03330135],
[ 0.80786963, 0.13641129, 0.14655269],
...,
[ 0.96223632, 0.43916482, 0.05542078],
[ 0.06631163, 0.0015063 , 0.46550416],
[ 0.88154752, 0.24332142, 0.66746564]])
"""
import ROOT
if n_samples <= 0:
raise ValueError("n_samples must be greater than 0")
if seed is not None:
if seed < 0:
raise ValueError("seed must be positive or 0")
ROOT.gRandom.SetSeed(seed)
# functions
if isinstance(obj, ROOT.TF1):
if isinstance(obj, ROOT.TF3):
return _librootnumpy.sample_f3(
ROOT.AsCObject(obj), n_samples)
elif isinstance(obj, ROOT.TF2):
return _librootnumpy.sample_f2(
ROOT.AsCObject(obj), n_samples)
return _librootnumpy.sample_f1(ROOT.AsCObject(obj), n_samples)
# histograms
elif isinstance(obj, ROOT.TH1):
if isinstance(obj, ROOT.TH3):
return _librootnumpy.sample_h3(
ROOT.AsCObject(obj), n_samples)
elif isinstance(obj, ROOT.TH2):
return _librootnumpy.sample_h2(
ROOT.AsCObject(obj), n_samples)
return _librootnumpy.sample_h1(ROOT.AsCObject(obj), n_samples)
raise TypeError(
"obj must be a ROOT function or histogram") | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/root_numpy/_sample.py | 0.875361 | 0.541166 | _sample.py | pypi |
import numpy as np
from . import _librootnumpy
__all__ = [
'fill_hist',
'fill_profile',
'hist2array',
'array2hist',
]
DTYPE_ROOT2NUMPY = dict(C='i1', S='i2', I='i4', L='i8', F='f4', D='f8')
ARRAY_NUMPY2ROOT = dict(
[(ndim, dict([
(hist_type,
getattr(_librootnumpy, 'h{0}{1}_array'.format(
ndim, hist_type.lower())))
for hist_type in 'DFISC']))
for ndim in (1, 2, 3)])
def fill_hist(hist, array, weights=None, return_indices=False):
"""Fill a ROOT histogram with a NumPy array.
Parameters
----------
hist : ROOT TH1, TH2, or TH3
The ROOT histogram to fill.
array : numpy array of shape [n_samples, n_dimensions]
The values to fill the histogram with. The number of columns must match
the dimensionality of the histogram. Supply a flat numpy array when
filling a 1D histogram.
weights : numpy array
A flat numpy array of weights for each sample in ``array``.
return_indices : bool, optional (default=False)
If True then return an array of the bin indices filled for each element
in ``array``.
Returns
-------
indices : numpy array or None
If ``return_indices`` is True, then return an array of the bin indices
filled for each element in ``array`` otherwise return None.
"""
import ROOT
array = np.asarray(array, dtype=np.double)
if weights is not None:
weights = np.asarray(weights, dtype=np.double)
if weights.shape[0] != array.shape[0]:
raise ValueError("array and weights must have the same length")
if weights.ndim != 1:
raise ValueError("weight must be 1-dimensional")
if isinstance(hist, ROOT.TH3):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 3:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the histogram")
return _librootnumpy.fill_h3(
ROOT.AsCObject(hist), array, weights, return_indices)
elif isinstance(hist, ROOT.TH2):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 2:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the histogram")
return _librootnumpy.fill_h2(
ROOT.AsCObject(hist), array, weights, return_indices)
elif isinstance(hist, ROOT.TH1):
if array.ndim != 1:
raise ValueError("array must be 1-dimensional")
return _librootnumpy.fill_h1(
ROOT.AsCObject(hist), array, weights, return_indices)
raise TypeError(
"hist must be an instance of ROOT.TH1, ROOT.TH2, or ROOT.TH3")
def fill_profile(profile, array, weights=None, return_indices=False):
"""Fill a ROOT profile with a NumPy array.
Parameters
----------
profile : ROOT TProfile, TProfile2D, or TProfile3D
The ROOT profile to fill.
array : numpy array of shape [n_samples, n_dimensions]
The values to fill the histogram with. There must be one more column
than the dimensionality of the profile.
weights : numpy array
A flat numpy array of weights for each sample in ``array``.
return_indices : bool, optional (default=False)
If True then return an array of the bin indices filled for each element
in ``array``.
Returns
-------
indices : numpy array or None
If ``return_indices`` is True, then return an array of the bin indices
filled for each element in ``array`` otherwise return None.
"""
import ROOT
array = np.asarray(array, dtype=np.double)
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != profile.GetDimension() + 1:
raise ValueError(
"there must be one more column than the "
"dimensionality of the profile")
if weights is not None:
weights = np.asarray(weights, dtype=np.double)
if weights.shape[0] != array.shape[0]:
raise ValueError("array and weights must have the same length")
if weights.ndim != 1:
raise ValueError("weight must be 1-dimensional")
if isinstance(profile, ROOT.TProfile3D):
return _librootnumpy.fill_p3(
ROOT.AsCObject(profile), array, weights, return_indices)
elif isinstance(profile, ROOT.TProfile2D):
return _librootnumpy.fill_p2(
ROOT.AsCObject(profile), array, weights, return_indices)
elif isinstance(profile, ROOT.TProfile):
return _librootnumpy.fill_p1(
ROOT.AsCObject(profile), array, weights, return_indices)
raise TypeError(
"profile must be an instance of "
"ROOT.TProfile, ROOT.TProfile2D, or ROOT.TProfile3D")
def hist2array(hist, include_overflow=False, copy=True, return_edges=False):
"""Convert a ROOT histogram into a NumPy array
Parameters
----------
hist : ROOT TH1, TH2, TH3, THn, or THnSparse
The ROOT histogram to convert into an array
include_overflow : bool, optional (default=False)
If True, the over- and underflow bins will be included in the
output numpy array. These bins are excluded by default.
copy : bool, optional (default=True)
If True (the default) then copy the underlying array, otherwise the
NumPy array will view (and not own) the same memory as the ROOT
histogram's array.
return_edges : bool, optional (default=False)
If True, also return the bin edges along each axis.
Returns
-------
array : numpy array
A NumPy array containing the histogram bin values
edges : list of numpy arrays
A list of numpy arrays where each array contains the bin edges along
the corresponding axis of ``hist``. Overflow and underflow bins are not
included.
Raises
------
TypeError
If hist is not a ROOT histogram.
See Also
--------
array2hist
"""
import ROOT
# Determine dimensionality and shape
simple_hist = True
if isinstance(hist, ROOT.TH3):
shape = (hist.GetNbinsZ() + 2,
hist.GetNbinsY() + 2,
hist.GetNbinsX() + 2)
elif isinstance(hist, ROOT.TH2):
shape = (hist.GetNbinsY() + 2, hist.GetNbinsX() + 2)
elif isinstance(hist, ROOT.TH1):
shape = (hist.GetNbinsX() + 2,)
elif isinstance(hist, ROOT.THnBase):
shape = tuple([hist.GetAxis(i).GetNbins() + 2
for i in range(hist.GetNdimensions())])
simple_hist = False
else:
raise TypeError(
"hist must be an instance of ROOT.TH1, "
"ROOT.TH2, ROOT.TH3, or ROOT.THnBase")
# Determine the corresponding numpy dtype
if simple_hist:
for hist_type in 'DFISC':
if isinstance(hist, getattr(ROOT, 'TArray{0}'.format(hist_type))):
break
else:
raise AssertionError(
"hist is somehow an instance of TH[1|2|3] "
"but not TArray[D|F|I|S|C]")
else: # THn, THnSparse
if isinstance(hist, ROOT.THnSparse):
cls_string = 'THnSparse{0}'
else:
cls_string = 'THn{0}'
for hist_type in 'CSILFD':
if isinstance(hist, getattr(ROOT, cls_string.format(hist_type))):
break
else:
raise AssertionError(
"unsupported THn or THnSparse bin type")
if simple_hist:
# Constuct a NumPy array viewing the underlying histogram array
if hist_type == 'C':
array_func = getattr(_librootnumpy,
'array_h{0}c'.format(len(shape)))
array = array_func(ROOT.AsCObject(hist))
array.shape = shape
else:
dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type])
array = np.ndarray(shape=shape, dtype=dtype,
buffer=hist.GetArray())
else: # THn THnSparse
dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type])
if isinstance(hist, ROOT.THnSparse):
array = _librootnumpy.thnsparse2array(ROOT.AsCObject(hist),
shape, dtype)
else:
array = _librootnumpy.thn2array(ROOT.AsCObject(hist),
shape, dtype)
if return_edges:
if simple_hist:
ndims = hist.GetDimension()
axis_getters = ['GetXaxis', 'GetYaxis', 'GetZaxis'][:ndims]
else:
ndims = hist.GetNdimensions()
axis_getters = ['GetAxis'] * ndims
edges = []
for idim, axis_getter in zip(range(ndims), axis_getters):
# GetXaxis expects 0 parameters while we need the axis in GetAxis
ax = getattr(hist, axis_getter)(*(() if simple_hist else (idim,)))
# `edges` is Nbins + 1 in order to have the last bin's upper edge as well
edges.append(np.empty(ax.GetNbins() + 1, dtype=np.double))
# load the lower edges into `edges`
ax.GetLowEdge(edges[-1])
# Get the upper edge of the last bin
edges[-1][-1] = ax.GetBinUpEdge(ax.GetNbins())
if not include_overflow:
# Remove overflow and underflow bins
array = array[tuple([slice(1, -1) for idim in range(array.ndim)])]
if simple_hist:
# Preserve x, y, z -> axis 0, 1, 2 order
array = np.transpose(array)
if copy:
array = np.copy(array)
if return_edges:
return array, edges
return array
def array2hist(array, hist, errors=None):
"""Convert a NumPy array into a ROOT histogram
Parameters
----------
array : numpy array
A 1, 2, or 3-d numpy array that will set the bin contents of the
ROOT histogram.
hist : ROOT TH1, TH2, or TH3
A ROOT histogram.
errors : numpy array
A numpy array of errors with matching dimensionality as the
bin contents array. If not given, no errors are set
Returns
-------
hist : ROOT TH1, TH2, or TH3
The ROOT histogram with bin contents set from the array.
Raises
------
TypeError
If hist is not a ROOT histogram.
ValueError
If the array and histogram are not compatible in terms of
dimensionality or number of bins along any axis.
Notes
-----
The NumPy array is copied into the histogram's internal array. If the input
NumPy array is not of the same data type as the histogram bin contents
(i.e. TH1D vs TH1F, etc.) and/or the input array does not contain overflow
bins along any of the axes, an additional copy is made into a temporary
array with all values converted into the matching data type and with
overflow bins included. Avoid this second copy by ensuring that the NumPy
array data type matches the histogram data type and that overflow bins are
included.
See Also
--------
hist2array
Examples
--------
>>> from root_numpy import array2hist, hist2array
>>> import numpy as np
>>> from rootpy.plotting import Hist2D
>>> hist = Hist2D(5, 0, 1, 3, 0, 1, type='F')
>>> array = np.random.randint(0, 10, size=(7, 5))
>>> array
array([[6, 7, 8, 3, 4],
[8, 9, 7, 6, 2],
[2, 3, 4, 5, 2],
[7, 6, 5, 7, 3],
[2, 0, 5, 6, 8],
[0, 0, 6, 5, 2],
[2, 2, 1, 5, 4]])
>>> _ = array2hist(array, hist)
>>> # dtype matches histogram type (D, F, I, S, C)
>>> hist2array(hist)
array([[ 9., 7., 6.],
[ 3., 4., 5.],
[ 6., 5., 7.],
[ 0., 5., 6.],
[ 0., 6., 5.]], dtype=float32)
>>> # overflow is excluded by default
>>> hist2array(hist, include_overflow=True)
array([[ 6., 7., 8., 3., 4.],
[ 8., 9., 7., 6., 2.],
[ 2., 3., 4., 5., 2.],
[ 7., 6., 5., 7., 3.],
[ 2., 0., 5., 6., 8.],
[ 0., 0., 6., 5., 2.],
[ 2., 2., 1., 5., 4.]], dtype=float32)
>>> array2 = hist2array(hist, include_overflow=True, copy=False)
>>> hist[2, 2] = -10
>>> # array2 views the same memory as hist because copy=False
>>> array2
array([[ 6., 7., 8., 3., 4.],
[ 8., 9., 7., 6., 2.],
[ 2., 3., -10., 5., 2.],
[ 7., 6., 5., 7., 3.],
[ 2., 0., 5., 6., 8.],
[ 0., 0., 6., 5., 2.],
[ 2., 2., 1., 5., 4.]], dtype=float32)
>>> # x, y, z axes correspond to axes 0, 1, 2 in numpy
>>> hist[2, 3] = -10
>>> array2
array([[ 6., 7., 8., 3., 4.],
[ 8., 9., 7., 6., 2.],
[ 2., 3., -10., -10., 2.],
[ 7., 6., 5., 7., 3.],
[ 2., 0., 5., 6., 8.],
[ 0., 0., 6., 5., 2.],
[ 2., 2., 1., 5., 4.]], dtype=float32)
"""
import ROOT
if isinstance(hist, ROOT.TH3):
shape = (hist.GetNbinsX() + 2,
hist.GetNbinsY() + 2,
hist.GetNbinsZ() + 2)
elif isinstance(hist, ROOT.TH2):
shape = (hist.GetNbinsX() + 2, hist.GetNbinsY() + 2)
elif isinstance(hist, ROOT.TH1):
shape = (hist.GetNbinsX() + 2,)
else:
raise TypeError(
"hist must be an instance of ROOT.TH1, ROOT.TH2, or ROOT.TH3")
# Determine the corresponding numpy dtype
for hist_type in 'DFISC':
if isinstance(hist, getattr(ROOT, 'TArray{0}'.format(hist_type))):
break
else:
raise AssertionError(
"hist is somehow an instance of TH[1|2|3] "
"but not TArray[D|F|I|S|C]")
# Constuct a NumPy array viewing the underlying histogram array
dtype = np.dtype(DTYPE_ROOT2NUMPY[hist_type])
# No copy is made if the dtype is the same as input
_array = np.ascontiguousarray(array, dtype=dtype)
if errors is not None:
if errors.shape != array.shape:
raise ValueError("Contents and errors are not compatible")
# errors are specified as doubles in SetError function
_errors = np.ascontiguousarray(errors, dtype=np.float64)
else:
_errors = None
if _array.ndim != len(shape):
raise ValueError(
"array and histogram do not have "
"the same number of dimensions")
if _array.shape != shape:
# Check for overflow along each axis
slices = []
for axis, bins in enumerate(shape):
if _array.shape[axis] == bins - 2:
slices.append(slice(1, -1))
elif _array.shape[axis] == bins:
slices.append(slice(None))
else:
raise ValueError(
"array and histogram are not compatible along "
"the {0}-axis".format("xyz"[axis]))
array_overflow = np.zeros(shape, dtype=dtype)
array_overflow[tuple(slices)] = _array
_array = array_overflow
if _errors is not None:
errors_overflow = np.zeros(shape, dtype=np.float64)
errors_overflow[tuple(slices)] = _errors
_errors = errors_overflow
ARRAY_NUMPY2ROOT[len(shape)][hist_type](
ROOT.AsCObject(hist), np.ravel(np.transpose(_array)))
# Set the number of entries to the number of array elements
hist.SetEntries(_array.size)
if _errors is not None:
hist.SetError(np.ravel(_errors.T))
return hist | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/root_numpy/_hist.py | 0.861305 | 0.62621 | _hist.py | pypi |
import warnings
from glob import glob
import numpy as np
from .extern.six import string_types
from . import _librootnumpy
__all__ = [
'root2array',
'root2rec',
'list_trees',
'list_branches',
'list_structures',
'list_directories',
'tree2array',
'tree2rec',
'array2tree',
'array2root',
]
def _glob(filenames):
"""Glob a filename or list of filenames but always return the original
string if the glob didn't match anything so URLs for remote file access
are not clobbered.
"""
if isinstance(filenames, string_types):
filenames = [filenames]
matches = []
for name in filenames:
matched_names = glob(name)
if not matched_names:
# use the original string
matches.append(name)
else:
matches.extend(matched_names)
return matches
def list_trees(filename):
"""Get list of the tree names in a ROOT file.
Parameters
----------
filename : str
Path to ROOT file.
Returns
-------
trees : list
List of tree names
"""
return _librootnumpy.list_trees(filename)
def list_branches(filename, treename=None):
"""Get a list of the branch names of a tree in a ROOT file.
Parameters
----------
filename : str
Path to ROOT file.
treename : str, optional (default=None)
Name of tree in the ROOT file.
(optional if the ROOT file has only one tree).
Returns
-------
branches : list
List of branch names
"""
return _librootnumpy.list_branches(filename, treename)
def list_directories(filename):
"""Get a list of the directories in a ROOT file.
Parameters
----------
filename : str
Path to ROOT file.
Returns
-------
directories : list
List of directory names.
"""
return _librootnumpy.list_directories(filename)
def list_structures(filename, treename=None):
"""Get a dictionary mapping branch names to leaf structures.
.. warning:: ``list_structures`` is deprecated and will be removed in
release 5.0.0.
Parameters
----------
filename : str
Path to ROOT file.
treename : str, optional (default=None)
Name of tree in the ROOT file
(optional if the ROOT file has only one tree).
Returns
-------
structures : OrderedDict
An ordered dictionary mapping branch names to leaf structures.
"""
warnings.warn("list_structures is deprecated and will be "
"removed in 5.0.0.", DeprecationWarning)
return _librootnumpy.list_structures(filename, treename)
def root2array(filenames,
treename=None,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1,
warn_missing_tree=False):
"""Convert trees in ROOT files into a numpy structured array.
Refer to the documentation of :func:`tree2array`.
Parameters
----------
filenames : str or list
ROOT file name pattern or list of patterns. Wildcarding is supported by
Python globbing.
treename : str, optional (default=None)
Name of the tree to convert (optional if each file contains exactly one
tree).
branches : list of strings and tuples or a string or tuple, optional (default=None)
List of branches and expressions to include as columns of the array or
a single branch or expression in which case a nonstructured array is
returned. If None then include all branches that can be converted.
Branches or expressions that result in variable-length subarrays can be
truncated at a fixed length by using the tuple ``(branch_or_expression,
fill_value, length)`` or converted into a single value with
``(branch_or_expression, fill_value)`` where ``length==1`` is implied.
``fill_value`` is used when the original array is shorter than
``length``. This truncation is after any object selection performed
with the ``object_selection`` argument.
selection : str, optional (default=None)
Only include entries fulfilling this condition. If the condition
evaluates to multiple values per tree entry (e.g. conditions on array
branches) then an entry will be included if the condition evaluates to
true for at least one array element.
object_selection : dict, optional (default=None)
A dictionary mapping selection strings to branch names or lists of
branch names. Only array elements passing the selection strings will be
included in the output array per entry in the tree. The branches
specified must be variable-length array-type branches and the length of
the selection and branches it acts on must match for each tree entry.
For example ``object_selection={'a > 0': ['a', 'b']}`` will include all
elements of 'a' and corresponding elements of 'b' where 'a > 0' for
each tree entry. 'a' and 'b' must have the same length in every tree
entry.
start, stop, step: int, optional (default=None)
The meaning of the ``start``, ``stop`` and ``step`` parameters is the
same as for Python slices. If a range is supplied (by setting some of
the ``start``, ``stop`` or ``step`` parameters), only the entries in
that range and fulfilling the ``selection`` condition (if defined) are
used.
include_weight : bool, optional (default=False)
Include a column containing the tree weight ``TTree::GetWeight()``.
Note that this will be the same value for all entries unless the tree
is actually a TChain containing multiple trees with different weights.
weight_name : str, optional (default='weight')
The field name for the weight column if ``include_weight=True``.
cache_size : int, optional (default=-1)
Set the size (in bytes) of the TTreeCache used while reading a TTree. A
value of -1 uses ROOT's default cache size. A value of 0 disables the
cache.
warn_missing_tree : bool, optional (default=False)
If True, then warn when a tree is missing from an input file instead of
raising an IOError.
Notes
-----
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array
array2tree
array2root
"""
filenames = _glob(filenames)
if not filenames:
raise ValueError("specify at least one filename")
if treename is None:
trees = list_trees(filenames[0])
if len(trees) > 1:
raise ValueError(
"treename must be specified if the file "
"contains more than one tree")
elif not trees:
raise IOError(
"no trees present in {0}".format(filenames[0]))
treename = trees[0]
if isinstance(branches, string_types):
# single branch selected
flatten = branches
branches = [branches]
elif isinstance(branches, tuple):
if len(branches) not in (2, 3):
raise ValueError(
"invalid branch tuple: {0}. "
"A branch tuple must contain two elements "
"(branch_name, fill_value) or three elements "
"(branch_name, fill_value, length) "
"to yield a single value or truncate, respectively".format(branches))
flatten = branches[0]
branches = [branches]
else:
flatten = False
arr = _librootnumpy.root2array_fromfile(
filenames, treename, branches,
selection, object_selection,
start, stop, step,
include_weight,
weight_name,
cache_size,
warn_missing_tree)
if flatten:
# select single column
return arr[flatten]
return arr
def root2rec(filenames,
treename=None,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1,
warn_missing_tree=False): # pragma: no cover
"""View the result of :func:`root2array` as a record array.
.. warning:: ``root2rec`` is deprecated and will be removed in
release 5.0.0. Instead use ``root2array(...).view(np.recarray)``.
Notes
-----
* This is equivalent to::
root2array(filenames, treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
root2array
"""
warnings.warn("root2rec is deprecated and will be removed in 5.0.0. "
"Instead use root2array(...).view(np.recarray)",
DeprecationWarning)
return root2array(filenames, treename,
branches, selection, object_selection,
start, stop, step,
include_weight,
weight_name,
cache_size,
warn_missing_tree).view(np.recarray)
def tree2array(tree,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1):
"""Convert a tree into a numpy structured array.
Convert branches of strings and basic types such as bool, int, float,
double, etc. as well as variable-length and fixed-length multidimensional
arrays and 1D or 2D vectors of basic types and strings. ``tree2array`` can
also create columns in the output array that are expressions involving the
TTree branches (i.e. ``'vect.Pt() / 1000'``) similar to ``TTree::Draw()``.
See the notes below for important details.
Parameters
----------
tree : ROOT TTree instance
The ROOT TTree to convert into an array.
branches : list of strings and tuples or a string or tuple, optional (default=None)
List of branches and expressions to include as columns of the array or
a single branch or expression in which case a nonstructured array is
returned. If None then include all branches that can be converted.
Branches or expressions that result in variable-length subarrays can be
truncated at a fixed length by using the tuple ``(branch_or_expression,
fill_value, length)`` or converted into a single value with
``(branch_or_expression, fill_value)`` where ``length==1`` is implied.
``fill_value`` is used when the original array is shorter than
``length``. This truncation is after any object selection performed
with the ``object_selection`` argument.
selection : str, optional (default=None)
Only include entries fulfilling this condition. If the condition
evaluates to multiple values per tree entry (e.g. conditions on array
branches) then an entry will be included if the condition evaluates to
true for at least one array element.
object_selection : dict, optional (default=None)
A dictionary mapping selection strings to branch names or lists of
branch names. Only array elements passing the selection strings will be
included in the output array per entry in the tree. The branches
specified must be variable-length array-type branches and the length of
the selection and branches it acts on must match for each tree entry.
For example ``object_selection={'a > 0': ['a', 'b']}`` will include all
elements of 'a' and corresponding elements of 'b' where 'a > 0' for
each tree entry. 'a' and 'b' must have the same length in every tree
entry.
start, stop, step: int, optional (default=None)
The meaning of the ``start``, ``stop`` and ``step`` parameters is the
same as for Python slices. If a range is supplied (by setting some of
the ``start``, ``stop`` or ``step`` parameters), only the entries in
that range and fulfilling the ``selection`` condition (if defined) are
used.
include_weight : bool, optional (default=False)
Include a column containing the tree weight ``TTree::GetWeight()``.
Note that this will be the same value for all entries unless the tree
is actually a TChain containing multiple trees with different weights.
weight_name : str, optional (default='weight')
The field name for the weight column if ``include_weight=True``.
cache_size : int, optional (default=-1)
Set the size (in bytes) of the TTreeCache used while reading a TTree. A
value of -1 uses ROOT's default cache size. A value of 0 disables the
cache.
Notes
-----
Types are converted according to the following table:
.. _conversion_table:
======================== ===============================
ROOT NumPy
======================== ===============================
``Bool_t`` ``np.bool``
``Char_t`` ``np.int8``
``UChar_t`` ``np.uint8``
``Short_t`` ``np.int16``
``UShort_t`` ``np.uint16``
``Int_t`` ``np.int32``
``UInt_t`` ``np.uint32``
``Float_t`` ``np.float32``
``Double_t`` ``np.float64``
``Long64_t`` ``np.int64``
``ULong64_t`` ``np.uint64``
``<type>[2][3]...`` ``(<nptype>, (2, 3, ...))``
``<type>[nx][2]...`` ``np.object``
``string`` ``np.object``
``vector<t>`` ``np.object``
``vector<vector<t> >`` ``np.object``
======================== ===============================
* Variable-length arrays (such as ``x[nx][2]``) and vectors (such as
``vector<int>``) are converted to NumPy arrays of the corresponding
types.
* Fixed-length arrays are converted to fixed-length NumPy array fields.
**Branches with different lengths:**
Note that when converting trees that have branches of different lengths
into numpy arrays, the shorter branches will be extended to match the
length of the longest branch by repeating their last values. If all
requested branches are shorter than the longest branch in the tree, this
will result in a "read failure" since beyond the end of the longest
requested branch no additional bytes will be read from the file and
root_numpy is unable to distinguish this from other ROOT errors that result
in no bytes being read. In this case, explicitly set the ``stop`` argument
to the length of the longest requested branch.
See Also
--------
root2array
array2root
array2tree
"""
import ROOT
if not isinstance(tree, ROOT.TTree):
raise TypeError("tree must be a ROOT.TTree")
cobj = ROOT.AsCObject(tree)
if isinstance(branches, string_types):
# single branch selected
flatten = branches
branches = [branches]
elif isinstance(branches, tuple):
if len(branches) not in (2, 3):
raise ValueError(
"invalid branch tuple: {0}. "
"A branch tuple must contain two elements "
"(branch_name, fill_value) or three elements "
"(branch_name, fill_value, length) "
"to yield a single value or truncate, respectively".format(branches))
flatten = branches[0]
branches = [branches]
else:
flatten = False
arr = _librootnumpy.root2array_fromtree(
cobj, branches, selection, object_selection,
start, stop, step,
include_weight,
weight_name,
cache_size)
if flatten:
# select single column
return arr[flatten]
return arr
def tree2rec(tree,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1): # pragma: no cover
"""View the result of :func:`tree2array` as a record array.
.. warning:: ``tree2rec`` is deprecated and will be removed in
release 5.0.0. Instead use ``tree2array(...).view(np.recarray)``.
Notes
-----
* This is equivalent to::
tree2array(treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array
"""
warnings.warn("tree2rec is deprecated and will be removed in 5.0.0. "
"Instead use tree2array(...).view(np.recarray)",
DeprecationWarning)
return tree2array(tree,
branches=branches,
selection=selection,
object_selection=object_selection,
start=start,
stop=stop,
step=step,
include_weight=include_weight,
weight_name=weight_name,
cache_size=cache_size).view(np.recarray)
def array2tree(arr, name='tree', tree=None):
"""Convert a numpy structured array into a ROOT TTree.
Fields of basic types, strings, and fixed-size subarrays of basic types are
supported. ``np.object`` and ``np.float16`` are currently not supported.
Parameters
----------
arr : array
A numpy structured array
name : str (optional, default='tree')
Name of the created ROOT TTree if ``tree`` is None.
tree : ROOT TTree (optional, default=None)
An existing ROOT TTree to be extended by the numpy array. Any branch
with the same name as a field in the numpy array will be extended as
long as the types are compatible, otherwise a TypeError is raised. New
branches will be created and filled for all new fields.
Returns
-------
root_tree : a ROOT TTree
Notes
-----
When using the ``tree`` argument to extend and/or add new branches to an
existing tree, note that it is possible to create branches of different
lengths. This will result in a warning from ROOT when root_numpy calls the
tree's ``SetEntries()`` method. Beyond that, the tree should still be
usable. While it might not be generally recommended to create branches with
differing lengths, this behaviour could be required in certain situations.
root_numpy makes no attempt to prevent such behaviour as this would be more
strict than ROOT itself. Also see the note about converting trees that have
branches of different lengths into numpy arrays in the documentation of
:func:`tree2array`.
See Also
--------
array2root
root2array
tree2array
Examples
--------
Convert a numpy array into a tree:
>>> from root_numpy import array2tree
>>> import numpy as np
>>>
>>> a = np.array([(1, 2.5, 3.4),
... (4, 5, 6.8)],
... dtype=[('a', np.int32),
... ('b', np.float32),
... ('c', np.float64)])
>>> tree = array2tree(a)
>>> tree.Scan()
************************************************
* Row * a * b * c *
************************************************
* 0 * 1 * 2.5 * 3.4 *
* 1 * 4 * 5 * 6.8 *
************************************************
Add new branches to an existing tree (continuing from the example above):
>>> b = np.array([(4, 10),
... (3, 5)],
... dtype=[('d', np.int32),
... ('e', np.int32)])
>>> array2tree(b, tree=tree)
<ROOT.TTree object ("tree") at 0x1449970>
>>> tree.Scan()
************************************************************************
* Row * a * b * c * d * e *
************************************************************************
* 0 * 1 * 2.5 * 3.4 * 4 * 10 *
* 1 * 4 * 5 * 6.8 * 3 * 5 *
************************************************************************
"""
import ROOT
if tree is not None:
if not isinstance(tree, ROOT.TTree):
raise TypeError("tree must be a ROOT.TTree")
incobj = ROOT.AsCObject(tree)
else:
incobj = None
cobj = _librootnumpy.array2tree_toCObj(arr, name=name, tree=incobj)
return ROOT.BindObject(cobj, 'TTree')
def array2root(arr, filename, treename='tree', mode='update'):
"""Convert a numpy array into a ROOT TTree and save it in a ROOT TFile.
Fields of basic types, strings, and fixed-size subarrays of basic types are
supported. ``np.object`` and ``np.float16`` are currently not supported.
Parameters
----------
arr : array
A numpy structured array
filename : str
Name of the output ROOT TFile. A new file will be created if it doesn't
already exist.
treename : str (optional, default='tree')
Name of the ROOT TTree that will be created. If a TTree with the same
name already exists in the TFile, it will be extended as documented in
:func:`array2tree`.
mode : str (optional, default='update')
Mode used to open the ROOT TFile ('update' or 'recreate').
See Also
--------
array2tree
tree2array
root2array
Examples
--------
>>> from root_numpy import array2root, root2array
>>> import numpy as np
>>>
>>> a = np.array([(1, 2.5, 3.4),
... (4, 5, 6.8)],
... dtype=[('a', np.int32),
... ('b', np.float32),
... ('c', np.float64)])
>>> array2root(a, 'test.root', mode='recreate')
>>> root2array('test.root')
array([(1, 2.5, 3.4), (4, 5.0, 6.8)],
dtype=[('a', '<i4'), ('b', '<f4'), ('c', '<f8')])
>>>
>>> a = np.array(['', 'a', 'ab', 'abc', 'xyz', ''],
... dtype=[('string', 'S3')])
>>> array2root(a, 'test.root', mode='recreate')
>>> root2array('test.root')
array([('',), ('a',), ('ab',), ('abc',), ('xyz',), ('',)],
dtype=[('string', 'S3')])
>>>
>>> a = np.array([([1, 2, 3],),
... ([4, 5, 6],)],
... dtype=[('array', np.int32, (3,))])
>>> array2root(a, 'test.root', mode='recreate')
>>> root2array('test.root')
array([([1, 2, 3],), ([4, 5, 6],)],
dtype=[('array', '<i4', (3,))])
"""
_librootnumpy.array2root(arr, filename, treename, mode) | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/root_numpy/_tree.py | 0.798894 | 0.366703 | _tree.py | pypi |
import uuid
import numpy as np
from .extern.six import string_types
from . import _librootnumpy
__all__ = [
'evaluate',
]
def evaluate(obj, array):
"""Evaluate a ROOT histogram, function, graph, or spline over an array.
Parameters
----------
obj : TH[1|2|3], TF[1|2|3], TFormula, TGraph, TSpline, or string
A ROOT histogram, function, formula, graph, spline, or string. If a
string is specified, a TFormula is created.
array : ndarray
An array containing the values to evaluate the ROOT object on. The
shape must match the dimensionality of the ROOT object.
Returns
-------
y : array
An array containing the values of the ROOT object evaluated at each
value in the input array.
Raises
------
TypeError
If the ROOT object is not a histogram, function, graph, or spline.
ValueError
If the shape of the array is not compatible with the dimensionality of
the ROOT object being evaluated. If the string expression does not
compile to a valid TFormula expression.
Examples
--------
>>> from root_numpy import evaluate
>>> from ROOT import TF1, TF2
>>> func = TF1("f1", "x*x")
>>> evaluate(func, [1, 2, 3, 4])
array([ 1., 4., 9., 16.])
>>> func = TF2("f2", "x*y")
>>> evaluate(func, [[1, 1], [1, 2], [3, 1]])
array([ 1., 2., 3.])
>>> evaluate("x*y", [[1, 1], [1, 2], [3, 1]])
array([ 1., 2., 3.])
"""
import ROOT
array = np.asarray(array, dtype=np.double)
if isinstance(obj, ROOT.TH1):
if isinstance(obj, ROOT.TH3):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 3:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the histogram")
return _librootnumpy.evaluate_h3(
ROOT.AsCObject(obj), array)
elif isinstance(obj, ROOT.TH2):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 2:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the histogram")
return _librootnumpy.evaluate_h2(
ROOT.AsCObject(obj), array)
if array.ndim != 1:
raise ValueError("array must be 1-dimensional")
return _librootnumpy.evaluate_h1(
ROOT.AsCObject(obj), array)
elif isinstance(obj, ROOT.TF1):
if isinstance(obj, ROOT.TF3):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 3:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the function")
return _librootnumpy.evaluate_f3(
ROOT.AsCObject(obj), array)
elif isinstance(obj, ROOT.TF2):
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != 2:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the function")
return _librootnumpy.evaluate_f2(
ROOT.AsCObject(obj), array)
if array.ndim != 1:
raise ValueError("array must be 1-dimensional")
return _librootnumpy.evaluate_f1(
ROOT.AsCObject(obj), array)
elif isinstance(obj, (string_types, ROOT.TFormula)):
if isinstance(obj, string_types):
# attempt to create a formula
obj = ROOT.TFormula(uuid.uuid4().hex, obj)
ndim = obj.GetNdim()
if ndim == 0:
raise ValueError("invalid formula expression")
if ndim == 1:
if array.ndim != 1:
raise ValueError("array must be 1-dimensional")
return _librootnumpy.evaluate_formula_1d(
ROOT.AsCObject(obj), array)
if array.ndim != 2:
raise ValueError("array must be 2-dimensional")
if array.shape[1] != ndim:
raise ValueError(
"length of the second dimension must equal "
"the dimension of the function")
if ndim == 2:
return _librootnumpy.evaluate_formula_2d(
ROOT.AsCObject(obj), array)
elif ndim == 3:
return _librootnumpy.evaluate_formula_3d(
ROOT.AsCObject(obj), array)
# 4d
return _librootnumpy.evaluate_formula_4d(
ROOT.AsCObject(obj), array)
elif isinstance(obj, ROOT.TGraph):
if array.ndim != 1:
raise ValueError("array must be 1-dimensional")
return _librootnumpy.evaluate_graph(
ROOT.AsCObject(obj), array)
elif isinstance(obj, ROOT.TSpline):
if array.ndim != 1:
raise ValueError("array must be 1-dimensional")
return _librootnumpy.evaluate_spline(
ROOT.AsCObject(obj), array)
raise TypeError(
"obj is not a ROOT histogram, function, formula, "
"graph, spline or string") | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/root_numpy/_evaluate.py | 0.851089 | 0.619673 | _evaluate.py | pypi |
import numpy as np
import operator
from .extern.six import string_types
from ._librootnumpy import _blockwise_inner_join
__all__ = [
'rec2array',
'stack',
'stretch',
'dup_idx',
'blockwise_inner_join',
]
VLEN = np.vectorize(len)
def rec2array(rec, fields=None):
"""Convert a record/structured array into an ndarray with a homogeneous data type.
Parameters
----------
rec : NumPy record/structured array
A NumPy structured array that will be cast into a homogenous data type.
fields : list of strings or string, optional (default=None)
The fields to include as columns in the output array. If None, then all
columns will be included. All fields must have the same shape.
See below regarding the case where ``fields`` is a string.
Returns
-------
array : NumPy ndarray
A new NumPy ndarray with homogeneous data types for all columns. If the
fields are scalars the shape will be ``(len(rec), num_fields)``. If the
fields are arrays of length ``num_things`` the shape will be
``(len(rec), num_things, num_fields)``. If ``fields`` is a string (a
single field), then the shape will be simplified to remove the last
dimension ``num_fields``. This simplification will not occur if
``fields`` is a list containing a single field.
Examples
--------
>>> from root_numpy import rec2array
>>> import numpy as np
>>> a = np.array([
... (12345, 2., 2.1, True),
... (3, 4., 4.2, False),],
... dtype=[
... ('x', np.int32),
... ('y', np.float32),
... ('z', np.float64),
... ('w', np.bool)])
>>> arr = rec2array(a)
>>> arr
array([[ 1.23450000e+04, 2.00000000e+00, 2.10000000e+00,
1.00000000e+00],
[ 3.00000000e+00, 4.00000000e+00, 4.20000000e+00,
0.00000000e+00]])
>>> arr.dtype
dtype('float64')
>>>
>>> a = np.array([
... ([1, 2, 3], [4.5, 6, 9.5],),
... ([4, 5, 6], [3.3, 7.5, 8.4],),],
... dtype=[
... ('x', np.int32, (3,)),
... ('y', np.float32, (3,))])
>>> arr = rec2array(a)
>>> arr
array([[[ 1. , 4.5 ],
[ 2. , 6. ],
[ 3. , 9.5 ]],
<BLANKLINE>
[[ 4. , 3.29999995],
[ 5. , 7.5 ],
[ 6. , 8.39999962]]])
>>> arr.shape
(2, 3, 2)
"""
simplify = False
if fields is None:
fields = rec.dtype.names
elif isinstance(fields, string_types):
fields = [fields]
simplify = True
# Creates a copy and casts all data to the same type
arr = np.dstack([rec[field] for field in fields])
# Check for array-type fields. If none, then remove outer dimension.
# Only need to check first field since np.dstack will anyway raise an
# exception if the shapes don't match
# np.dstack will also fail if fields is an empty list
if not rec.dtype[fields[0]].shape:
arr = arr[0]
if simplify:
# remove last dimension (will be of size 1)
arr = arr.reshape(arr.shape[:-1])
return arr
def stack(recs, fields=None):
"""Stack common fields in multiple record arrays (concatenate them).
Parameters
----------
recs : list
List of NumPy record arrays
fields : list of strings, optional (default=None)
The list of fields to include in the stacked array. If None, then
include the fields in common to all the record arrays.
Returns
-------
rec : NumPy record array
The stacked array.
"""
if fields is None:
fields = list(set.intersection(
*[set(rec.dtype.names) for rec in recs]))
# preserve order of fields wrt first record array
if set(fields) == set(recs[0].dtype.names):
fields = list(recs[0].dtype.names)
return np.hstack([rec[fields] for rec in recs])
def stretch(arr, fields=None, return_indices=False):
"""Stretch an array.
Stretch an array by ``hstack()``-ing multiple array fields while
preserving column names and record array structure. If a scalar field is
specified, it will be stretched along with array fields.
Parameters
----------
arr : NumPy structured or record array
The array to be stretched.
fields : list of strings or string, optional (default=None)
A list of column names or a single column name to stretch.
If ``fields`` is a string, then the output array is a one-dimensional
unstructured array containing only the stretched elements of that
field. If None, then stretch all fields.
return_indices : bool, optional (default=False)
If True, the array index of each stretched array entry will be
returned in addition to the stretched array.
This changes the return type of this function to a tuple consisting
of a structured array and a numpy int64 array.
Returns
-------
ret : A NumPy structured array
The stretched array.
Examples
--------
>>> import numpy as np
>>> from root_numpy import stretch
>>> arr = np.empty(2, dtype=[('scalar', np.int), ('array', 'O')])
>>> arr[0] = (0, np.array([1, 2, 3], dtype=np.float))
>>> arr[1] = (1, np.array([4, 5, 6], dtype=np.float))
>>> stretch(arr, ['scalar', 'array'])
array([(0, 1.0), (0, 2.0), (0, 3.0), (1, 4.0), (1, 5.0), (1, 6.0)],
dtype=[('scalar', '<i8'), ('array', '<f8')])
"""
dtype = []
len_array = None
flatten = False
if fields is None:
fields = arr.dtype.names
elif isinstance(fields, string_types):
fields = [fields]
flatten = True
# Construct dtype and check consistency
for field in fields:
dt = arr.dtype[field]
if dt == 'O' or len(dt.shape):
if dt == 'O':
# Variable-length array field
lengths = VLEN(arr[field])
else:
# Fixed-length array field
lengths = np.repeat(dt.shape[0], arr.shape[0])
if len_array is None:
len_array = lengths
elif not np.array_equal(lengths, len_array):
raise ValueError(
"inconsistent lengths of array columns in input")
if dt == 'O':
dtype.append((field, arr[field][0].dtype))
else:
dtype.append((field, arr[field].dtype, dt.shape[1:]))
else:
# Scalar field
dtype.append((field, dt))
if len_array is None:
raise RuntimeError("no array column in input")
# Build stretched output
ret = np.empty(np.sum(len_array), dtype=dtype)
for field in fields:
dt = arr.dtype[field]
if dt == 'O' or len(dt.shape) == 1:
# Variable-length or 1D fixed-length array field
ret[field] = np.hstack(arr[field])
elif len(dt.shape):
# Multidimensional fixed-length array field
ret[field] = np.vstack(arr[field])
else:
# Scalar field
ret[field] = np.repeat(arr[field], len_array)
if flatten:
ret = ret[fields[0]]
if return_indices:
idx = np.concatenate(list(map(np.arange, len_array)))
return ret, idx
return ret
def dup_idx(arr):
"""Return the indices of all duplicated array elements.
Parameters
----------
arr : array-like object
An array-like object
Returns
-------
idx : NumPy array
An array containing the indices of the duplicated elements
Examples
--------
>>> from root_numpy import dup_idx
>>> dup_idx([1, 2, 3, 4, 5])
array([], dtype=int64)
>>> dup_idx([1, 2, 3, 4, 5, 5])
array([4, 5])
>>> dup_idx([1, 2, 3, 4, 5, 5, 1])
array([0, 4, 5, 6])
"""
_, b = np.unique(arr, return_inverse=True)
return np.nonzero(np.logical_or.reduce(
b[:, np.newaxis] == np.nonzero(np.bincount(b) > 1),
axis=1))[0]
def blockwise_inner_join(data, left, foreign_key, right,
force_repeat=None,
foreign_key_name=None):
"""Perform a blockwise inner join.
Perform a blockwise inner join from names specified in ``left`` to
``right`` via ``foreign_key``: left->foreign_key->right.
Parameters
----------
data : array
A structured NumPy array.
left : array
Array of left side column names.
foreign_key : array or string
NumPy array or string ``foreign_key`` column name. This column can be
either an integer or an array of ints. If ``foreign_key`` is an array
of int column, left column will be treated according to left column
type:
* Scalar columns or columns in ``force_repeat`` will be repeated
* Array columns not in ``force_repeat`` will be assumed to the
same length as ``foreign_key`` and will be stretched by index
right : array
Array of right side column names. These are array columns that each
index ``foreign_key`` points to. These columns are assumed to have the
same length.
force_repeat : array, optional (default=None)
Array of left column names that will be forced to stretch even if it's
an array (useful when you want to emulate a multiple join).
foreign_key_name : str, optional (default=None)
The name of foreign key column in the output array.
Examples
--------
>>> import numpy as np
>>> from root_numpy import blockwise_inner_join
>>> test_data = np.array([
(1.0, np.array([11, 12, 13]), np.array([1, 0, 1]), 0, np.array([1, 2, 3])),
(2.0, np.array([21, 22, 23]), np.array([-1, 2, -1]), 1, np.array([31, 32, 33]))],
dtype=[('sl', np.float), ('al', 'O'), ('fk', 'O'), ('s_fk', np.int), ('ar', 'O')])
>>> blockwise_inner_join(test_data, ['sl', 'al'], test_data['fk'], ['ar'])
array([(1.0, 11, 2, 1), (1.0, 12, 1, 0), (1.0, 13, 2, 1), (2.0, 22, 33, 2)],
dtype=[('sl', '<f8'), ('al', '<i8'), ('ar', '<i8'), ('fk', '<i8')])
>>> blockwise_inner_join(test_data, ['sl', 'al'], test_data['fk'], ['ar'], force_repeat=['al'])
array([(1.0, [11, 12, 13], 2, 1), (1.0, [11, 12, 13], 1, 0),
(1.0, [11, 12, 13], 2, 1), (2.0, [21, 22, 23], 33, 2)],
dtype=[('sl', '<f8'), ('al', '|O8'), ('ar', '<i8'), ('fk', '<i8')])
"""
if isinstance(foreign_key, string_types):
foreign_key = data[foreign_key]
return _blockwise_inner_join(data, left, foreign_key, right,
force_repeat, foreign_key_name) | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/root_numpy/_utils.py | 0.867191 | 0.615507 | _utils.py | pypi |
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/root_numpy/extern/ordereddict.py | 0.512937 | 0.222236 | ordereddict.py | pypi |
import numpy as np
import ROOT
from ROOT import TMVA
from . import _libtmvanumpy
__all__ = [
'evaluate_reader',
'evaluate_method',
]
def evaluate_reader(reader, name, events, aux=0.):
"""Evaluate a TMVA::Reader over a NumPy array.
Parameters
----------
reader : TMVA::Reader
A TMVA::Factory instance with variables booked in exactly the same
order as the columns in ``events``.
name : string
The method name.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
aux : float, optional (default=0.)
Auxiliary value used by MethodCuts to set the desired signal
efficiency.
Returns
-------
output : numpy array of shape [n_events]
The method output value for each event
See Also
--------
evaluate_method
"""
if not isinstance(reader, TMVA.Reader):
raise TypeError("reader must be a TMVA.Reader instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
return _libtmvanumpy.evaluate_reader(
ROOT.AsCObject(reader), name, events, aux)
def evaluate_method(method, events, aux=0.):
"""Evaluate a TMVA::MethodBase over a NumPy array.
.. warning:: TMVA::Reader has known problems with thread safety in versions
of ROOT earlier than 6.03. There will potentially be a crash if you call
``method = reader.FindMVA(name)`` in Python and then pass this
``method`` here. Consider using ``evaluate_reader`` instead if you are
affected by this crash.
Parameters
----------
method : TMVA::MethodBase
A TMVA::MethodBase instance with variables booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
aux : float, optional (default=0.)
Auxiliary value used by MethodCuts to set the desired signal
efficiency.
Returns
-------
output : numpy array of shape [n_events]
The method output value for each event
See Also
--------
evaluate_reader
"""
if not isinstance(method, TMVA.MethodBase):
raise TypeError("reader must be a TMVA.MethodBase instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
return _libtmvanumpy.evaluate_method(ROOT.AsCObject(method), events, aux) | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/root_numpy/tmva/_evaluate.py | 0.916946 | 0.479747 | _evaluate.py | pypi |
import numpy as np
import ROOT
from ROOT import TMVA
from . import _libtmvanumpy
from .. import ROOT_VERSION
__all__ = [
'add_classification_events',
'add_regression_events',
]
NEW_TMVA_API = ROOT_VERSION >= '6.07/04'
def add_classification_events(obj, events, labels, signal_label=None,
weights=None, test=False):
"""Add classification events to a TMVA::Factory or TMVA::DataLoader from NumPy arrays.
Parameters
----------
obj : TMVA::Factory or TMVA::DataLoader
A TMVA::Factory or TMVA::DataLoader (TMVA's interface as of ROOT
6.07/04) instance with variables already booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
labels : numpy array of shape [n_events]
The class labels (signal or background) corresponding to each event in
``events``.
signal_label : float or int, optional (default=None)
The value in ``labels`` for signal events, if ``labels`` contains only
two classes. If None, the highest value in ``labels`` is used.
weights : numpy array of shape [n_events], optional
Event weights.
test : bool, optional (default=False)
If True, then the events will be added as test events, otherwise they
are added as training events by default.
Notes
-----
* A TMVA::Factory or TMVA::DataLoader requires you to add both training and
test events even if you don't intend to call ``TestAllMethods()``.
* When using MethodCuts, the first event added must be a signal event,
otherwise TMVA will fail with ``<FATAL> Interval : maximum lower than
minimum``. To place a signal event first::
# Get index of first signal event
first_signal = np.nonzero(labels == signal_label)[0][0]
# Swap this with first event
events[0], events[first_signal] = events[first_signal].copy(), events[0].copy()
labels[0], labels[first_signal] = labels[first_signal], labels[0]
weights[0], weights[first_signal] = weights[first_signal], weights[0]
"""
if NEW_TMVA_API: # pragma: no cover
if not isinstance(obj, TMVA.DataLoader):
raise TypeError(
"obj must be a TMVA.DataLoader "
"instance for ROOT >= 6.07/04")
else: # pragma: no cover
if not isinstance(obj, TMVA.Factory):
raise TypeError(
"obj must be a TMVA.Factory instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
class_labels, class_idx = np.unique(labels, return_inverse=True)
if class_idx.shape[0] != events.shape[0]:
raise ValueError("numbers of events and labels do not match")
if weights is not None:
weights = np.asarray(weights, dtype=np.float64)
if weights.shape[0] != events.shape[0]:
raise ValueError("numbers of events and weights do not match")
if weights.ndim != 1:
raise ValueError("weights must be one-dimensional")
n_classes = class_labels.shape[0]
if n_classes > 2:
# multiclass classification
_libtmvanumpy.add_events_multiclass(
ROOT.AsCObject(obj), events, class_idx,
weights, test)
elif n_classes == 2:
# binary classification
if signal_label is None:
signal_label = class_labels[1]
signal_label = np.where(class_labels == signal_label)[0][0]
_libtmvanumpy.add_events_twoclass(
ROOT.AsCObject(obj), events, class_idx,
signal_label, weights, test)
else:
raise ValueError("labels must contain at least two classes")
def add_regression_events(obj, events, targets, weights=None, test=False):
"""Add regression events to a TMVA::Factory or TMVA::DataLoader from NumPy arrays.
Parameters
----------
obj : TMVA::Factory or TMVA::DataLoader
A TMVA::Factory or TMVA::DataLoader (TMVA's interface as of ROOT
6.07/04) instance with variables already
booked in exactly the same order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
targets : numpy array of shape [n_events] or [n_events, n_targets]
The target value(s) for each event in ``events``. For multiple target
values, ``targets`` must be a two-dimensional array with a column for
each target in the same order in which you called ``AddTarget()``.
weights : numpy array of shape [n_events], optional
Event weights.
test : bool, optional (default=False)
If True, then the events will be added as test events, otherwise they
are added as training events by default.
Notes
-----
A TMVA::Factory or TMVA::DataLoader requires you to add both training and
test events even if you don't intend to call ``TestAllMethods()``.
"""
if NEW_TMVA_API: # pragma: no cover
if not isinstance(obj, TMVA.DataLoader):
raise TypeError(
"obj must be a TMVA.DataLoader "
"instance for ROOT >= 6.07/04")
else: # pragma: no cover
if not isinstance(obj, TMVA.Factory):
raise TypeError(
"obj must be a TMVA.Factory instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
targets = np.asarray(targets, dtype=np.float64)
if targets.shape[0] != events.shape[0]:
raise ValueError("the lengths of events and targets do not match")
if targets.ndim == 1:
# convert to 2D
targets = targets[:, np.newaxis]
elif targets.ndim > 2:
raise ValueError("targets can not have more than two dimensions")
if weights is not None:
weights = np.asarray(weights, dtype=np.float64)
if weights.shape[0] != events.shape[0]:
raise ValueError("numbers of events and weights do not match")
if weights.ndim != 1:
raise ValueError("weights must be one-dimensional")
_libtmvanumpy.add_events_regression(
ROOT.AsCObject(obj), events, targets, weights, test) | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/root_numpy/tmva/_data.py | 0.826537 | 0.417568 | _data.py | pypi |
from rootpy.extern.six.moves import range
from rootpy.tree import Tree, TreeModel, FloatCol
from rootpy.plotting import Canvas, Hist2D, set_style
from rootpy.io import root_open
from root_numpy import root2array, array2tree, rec2array, fill_hist
import ROOT
import numpy as np
from random import gauss
import random
import os
ROOT.gROOT.SetBatch()
set_style('ATLAS')
np.random.seed(0)
random.seed(0)
# create an example TTree dataset
class Sample(TreeModel):
x = FloatCol()
y = FloatCol()
with root_open('sample.root', 'recreate'):
# generate toy data in a TTree
tree = Tree('sample', model=Sample)
for i in range(500):
tree.x = gauss(0, 1)
tree.y = gauss(0, 1)
tree.Fill()
tree.write()
# read in the TTree as a NumPy array
array = root2array('sample.root', 'sample')
if os.path.exists('bootstrap.gif'):
os.remove('bootstrap.gif')
# Canvas name is set here to aid the automatic documentation generation
# It needs to take the GIF already saved instead of saving a png of the last
# frame.
canvas = Canvas(width=500, height=400, name='bootstrap.gif')
hist = Hist2D(10, -3, 3, 10, -3, 3, drawstyle='LEGO2')
output = root_open('bootstrap.root', 'recreate')
# bootstrap 10 times
for bootstrap_idx in range(10):
# sample with replacement
# http://docs.scipy.org/doc/numpy-dev/reference/generated/numpy.random.choice.html
sample_idx = np.random.choice(len(array), size=len(array), replace=True)
array_bootstrapped = array[sample_idx]
# convert back to a TTree and write it out
tree_bootstrapped = array2tree(
array_bootstrapped,
name='bootstrap_{0}'.format(bootstrap_idx))
tree_bootstrapped.Write()
tree_bootstrapped.Delete()
# fill the ROOT histogram with the numpy array
hist.Reset()
fill_hist(hist, rec2array(array_bootstrapped))
hist.Draw()
hist.xaxis.title = 'x'
hist.yaxis.title = 'y'
hist.zaxis.title = 'Events'
hist.xaxis.limits = (-2.5, 2.5)
hist.yaxis.limits = (-2.5, 2.5)
hist.zaxis.range_user = (0, 30)
hist.xaxis.divisions = 5
hist.yaxis.divisions = 5
hist.zaxis.divisions = 5
canvas.Print('bootstrap.gif+50')
# loop the gif
canvas.Print('bootstrap.gif++')
output.Close() | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/examples/core/plot_bootstrap.py | 0.584983 | 0.388212 | plot_bootstrap.py | pypi |
from array import array
import numpy as np
from numpy.random import RandomState
import matplotlib.pyplot as plt
from root_numpy.tmva import add_classification_events, evaluate_reader
from root_numpy import ROOT_VERSION
from ROOT import TMVA, TFile, TCut
plt.style.use('ggplot')
RNG = RandomState(42)
# Construct an example dataset for binary classification
n_vars = 2
n_events = 1000
signal = RNG.multivariate_normal(
np.ones(n_vars), np.diag(np.ones(n_vars)), n_events)
background = RNG.multivariate_normal(
np.ones(n_vars) * -1, np.diag(np.ones(n_vars)), n_events)
X = np.concatenate([signal, background])
y = np.ones(X.shape[0])
w = RNG.randint(1, 10, n_events * 2)
y[signal.shape[0]:] *= -1
permute = RNG.permutation(y.shape[0])
X = X[permute]
y = y[permute]
# Split into training and test datasets
X_train, y_train, w_train = X[:n_events], y[:n_events], w[:n_events]
X_test, y_test, w_test = X[n_events:], y[n_events:], w[n_events:]
output = TFile('tmva_output.root', 'recreate')
factory = TMVA.Factory('classifier', output,
'AnalysisType=Classification:'
'!V:Silent:!DrawProgressBar')
if ROOT_VERSION >= '6.07/04':
data = TMVA.DataLoader('.')
else:
data = factory
for n in range(n_vars):
data.AddVariable('f{0}'.format(n), 'F')
# Call root_numpy's utility functions to add events from the arrays
add_classification_events(data, X_train, y_train, weights=w_train)
add_classification_events(data, X_test, y_test, weights=w_test, test=True)
# The following line is necessary if events have been added individually:
data.PrepareTrainingAndTestTree(TCut('1'), 'NormMode=EqualNumEvents')
# Train a classifier
if ROOT_VERSION >= '6.07/04':
BookMethod = factory.BookMethod
else:
BookMethod = TMVA.Factory.BookMethod
BookMethod(data, 'Fisher', 'Fisher',
'Fisher:VarTransform=None:CreateMVAPdfs:'
'PDFInterpolMVAPdf=Spline2:NbinsMVAPdf=50:'
'NsmoothMVAPdf=10')
factory.TrainAllMethods()
# Classify the test dataset with the classifier
reader = TMVA.Reader()
for n in range(n_vars):
reader.AddVariable('f{0}'.format(n), array('f', [0.]))
reader.BookMVA('Fisher', 'weights/classifier_Fisher.weights.xml')
twoclass_output = evaluate_reader(reader, 'Fisher', X_test)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
cmap = plt.get_cmap('bwr')
fig = plt.figure(figsize=(10, 5))
fig.patch.set_alpha(0)
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = evaluate_reader(reader, 'Fisher', np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=cmap, vmin=Z.min(), vmax=Z.max(),
levels=np.linspace(Z.min(), Z.max(), 50))
plt.contour(xx, yy, Z, levels=[0], linestyles='dashed')
plt.axis("tight")
# Plot the training points
for i, n, c in zip([-1, 1], class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=cmap,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
ax = plt.subplot(122)
ax.xaxis.grid(False)
for i, n, c in zip([-1, 1], class_names, plot_colors):
plt.hist(twoclass_output[y_test == i],
bins=20,
range=(-4, 4),
facecolor=c,
label='Class %s' % n,
alpha=.5, histtype='stepfilled')
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, 140))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.25)
plt.show() | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/examples/tmva/plot_twoclass.py | 0.812161 | 0.525186 | plot_twoclass.py | pypi |
import numpy as np
import matplotlib.pyplot as plt
from root_numpy.tmva import add_regression_events, evaluate_reader
from root_numpy import ROOT_VERSION
from ROOT import TMVA, TFile, TCut
from array import array
plt.style.use('ggplot')
RNG = np.random.RandomState(1)
# Create an example regression dataset
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + \
np.sin(6 * X).ravel() + \
RNG.normal(0, 0.1, X.shape[0])
# Fit a regression model
output = TFile('tmva_output.root', 'recreate')
factory = TMVA.Factory('regressor', output,
'AnalysisType=Regression:'
'!V:Silent:!DrawProgressBar')
if ROOT_VERSION >= '6.07/04':
data = TMVA.DataLoader('.')
else:
data = factory
data.AddVariable('x', 'F')
data.AddTarget('y', 'F')
add_regression_events(data, X, y)
add_regression_events(data, X, y, test=True)
# The following line is necessary if events have been added individually:
data.PrepareTrainingAndTestTree(TCut('1'), '')
if ROOT_VERSION >= '6.07/04':
BookMethod = factory.BookMethod
else:
BookMethod = TMVA.Factory.BookMethod
BookMethod(data, 'BDT', 'BDT1',
'nCuts=20:NTrees=1:MaxDepth=4:BoostType=AdaBoostR2:'
'SeparationType=RegressionVariance')
BookMethod(data, 'BDT', 'BDT2',
'nCuts=20:NTrees=300:MaxDepth=4:BoostType=AdaBoostR2:'
'SeparationType=RegressionVariance')
factory.TrainAllMethods()
# Predict the regression target
reader = TMVA.Reader()
reader.AddVariable('x', array('f', [0.]))
reader.BookMVA('BDT1', 'weights/regressor_BDT1.weights.xml')
reader.BookMVA('BDT2', 'weights/regressor_BDT2.weights.xml')
y_1 = evaluate_reader(reader, 'BDT1', X)
y_2 = evaluate_reader(reader, 'BDT2', X)
# Plot the results
fig = plt.figure()
fig.patch.set_alpha(0)
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="1 tree", linewidth=2)
plt.plot(X, y_2, c="r", label="300 trees", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show() | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/examples/tmva/plot_regression.py | 0.688678 | 0.50708 | plot_regression.py | pypi |
from array import array
import numpy as np
from numpy.random import RandomState
from root_numpy.tmva import add_classification_events, evaluate_reader
from root_numpy import ROOT_VERSION
import matplotlib.pyplot as plt
from ROOT import TMVA, TFile, TCut
plt.style.use('ggplot')
RNG = RandomState(42)
# Construct an example multiclass dataset
n_events = 1000
class_0 = RNG.multivariate_normal(
[-2, -2], np.diag([1, 1]), n_events)
class_1 = RNG.multivariate_normal(
[0, 2], np.diag([1, 1]), n_events)
class_2 = RNG.multivariate_normal(
[2, -2], np.diag([1, 1]), n_events)
X = np.concatenate([class_0, class_1, class_2])
y = np.ones(X.shape[0])
w = RNG.randint(1, 10, n_events * 3)
y[:class_0.shape[0]] *= 0
y[-class_2.shape[0]:] *= 2
permute = RNG.permutation(y.shape[0])
X = X[permute]
y = y[permute]
# Split into training and test datasets
X_train, y_train, w_train = X[:n_events], y[:n_events], w[:n_events]
X_test, y_test, w_test = X[n_events:], y[n_events:], w[n_events:]
output = TFile('tmva_output.root', 'recreate')
factory = TMVA.Factory('classifier', output,
'AnalysisType=Multiclass:'
'!V:Silent:!DrawProgressBar')
if ROOT_VERSION >= '6.07/04':
data = TMVA.DataLoader('.')
else:
data = factory
for n in range(2):
data.AddVariable('f{0}'.format(n), 'F')
# Call root_numpy's utility functions to add events from the arrays
add_classification_events(data, X_train, y_train, weights=w_train)
add_classification_events(data, X_test, y_test, weights=w_test, test=True)
# The following line is necessary if events have been added individually:
data.PrepareTrainingAndTestTree(TCut('1'), 'NormMode=EqualNumEvents')
# Train an MLP
if ROOT_VERSION >= '6.07/04':
BookMethod = factory.BookMethod
else:
BookMethod = TMVA.Factory.BookMethod
BookMethod(data, 'MLP', 'MLP',
'NeuronType=tanh:NCycles=200:HiddenLayers=N+2,2:'
'TestRate=5:EstimatorType=MSE')
factory.TrainAllMethods()
# Classify the test dataset with the BDT
reader = TMVA.Reader()
for n in range(2):
reader.AddVariable('f{0}'.format(n), array('f', [0.]))
reader.BookMVA('MLP', 'weights/classifier_MLP.weights.xml')
class_proba = evaluate_reader(reader, 'MLP', X_test)
# Plot the decision boundaries
plot_colors = "rgb"
plot_step = 0.02
class_names = "ABC"
cmap = plt.get_cmap('Paired')
fig = plt.figure(figsize=(5, 5))
fig.patch.set_alpha(0)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = evaluate_reader(reader, 'MLP', np.c_[xx.ravel(), yy.ravel()])
Z = np.argmax(Z, axis=1) - 1
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=cmap, alpha=0.5)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(3), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=cmap,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
plt.tight_layout()
plt.show() | /root_numpy-4.8.0.tar.gz/root_numpy-4.8.0/examples/tmva/plot_multiclass.py | 0.768733 | 0.53127 | plot_multiclass.py | pypi |
import torch.nn as nn
def get_valid_patch_sizes():
return list((572 - (16*i) for i in range(31)))
class DownBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.pool = nn.MaxPool2d(2)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, in_channels*2,
kernel_size=3, padding=1),
nn.ReLU(),
nn.GroupNorm(32, in_channels*2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels*2, in_channels*2,
kernel_size=3, padding=1),
nn.ReLU(),
nn.GroupNorm(32, in_channels*2)
)
self.conv1x1 = nn.Sequential(
# down sample channels again.
nn.Conv2d(in_channels*2, in_channels,
kernel_size=1, stride=1, bias=False)
)
def forward(self, x):
out1 = self.pool(x)
out2 = self.conv1(out1)
out3 = self.conv2(out2)
out4 = self.conv1x1(out3)
return out4 + out1
def crop_tensor(tensor, target):
""" Crop tensor to target size """
_, _, tensor_height, tensor_width = tensor.size()
_, _, crop_height, crop_width = target.size()
left = (tensor_width - crop_height) // 2
top = (tensor_height - crop_width) // 2
right = left + crop_width
bottom = top + crop_height
cropped_tensor = tensor[:, :, top: bottom, left: right]
return cropped_tensor
class UpBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv1 = nn.Sequential(
nn.ConvTranspose2d(in_channels, in_channels,
kernel_size=2, stride=2, padding=0),
nn.ReLU(),
nn.GroupNorm(32, in_channels)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels, in_channels,
kernel_size=3, padding=0),
nn.ReLU(),
nn.GroupNorm(32, in_channels)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels, in_channels,
kernel_size=3, padding=0),
nn.ReLU(),
nn.GroupNorm(32, in_channels)
)
def forward(self, x, down_out):
out = self.conv1(x)
cropped = crop_tensor(down_out, out)
out = cropped + out # residual
out = self.conv2(out)
out = self.conv3(out)
return out
class UNetGNRes(nn.Module):
def __init__(self, im_channels=3):
super().__init__()
self.conv_in = nn.Sequential(
nn.Conv2d(im_channels, 64, kernel_size=3, padding=0),
nn.ReLU(),
nn.GroupNorm(32, 64),
nn.Conv2d(64, 64, kernel_size=3, padding=0),
nn.ReLU(),
nn.GroupNorm(32, 64)
# now at 568 x 568, 64 channels
)
self.down1 = DownBlock(64)
self.down2 = DownBlock(64)
self.down3 = DownBlock(64)
self.down4 = DownBlock(64)
self.up1 = UpBlock(64)
self.up2 = UpBlock(64)
self.up3 = UpBlock(64)
self.up4 = UpBlock(64)
self.conv_out = nn.Sequential(
nn.Conv2d(64, 2, kernel_size=1, padding=0),
nn.ReLU(),
nn.GroupNorm(2, 2)
)
def forward(self, x):
out1 = self.conv_in(x)
out2 = self.down1(out1)
out3 = self.down2(out2)
out4 = self.down3(out3)
out5 = self.down4(out4)
out = self.up1(out5, out4)
out = self.up2(out, out3)
out = self.up3(out, out2)
out = self.up4(out, out1)
out = self.conv_out(out)
return out | /root_painter_trainer-0.2.27.0.tar.gz/root_painter_trainer-0.2.27.0/trainer/unet.py | 0.952783 | 0.374876 | unet.py | pypi |
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
from skimage.transform import resize
import im_utils
def get_indices(im_shape, scale, sigma, padding=60):
""" based on cognitivemedium.com/assets/rmnist/Simard.pdf """
im_shape = [im_shape[0] + (padding * 2), im_shape[1] + (padding * 2)]
# We generate a grid of smalelr co-ordinates and then resize
# It's faster as less guassian_filtering.
# Are there any downsides to this?
# Further emprical tests required.
resize_coef = 8
smaller = (im_shape[0]//resize_coef, im_shape[1]//resize_coef)
sigma /= (resize_coef / 2)
scale /= (resize_coef / 2)
randx = np.random.uniform(low=-1.0, high=1.0, size=smaller)
randy = np.random.uniform(low=-1.0, high=1.0, size=smaller)
x_filtered = gaussian_filter(randx, sigma, mode="reflect") * scale
y_filtered = gaussian_filter(randy, sigma, mode="reflect") * scale
x_filtered = resize(x_filtered, im_shape[:2])
y_filtered = resize(y_filtered, im_shape[:2])
x_coords, y_coords = np.mgrid[0:im_shape[0], 0:im_shape[1]]
x_deformed = x_coords + x_filtered
y_deformed = y_coords + y_filtered
return x_deformed, y_deformed
def get_elastic_map(im_shape, scale, intensity):
assert 0 <= scale <= 1
assert 0 <= intensity <= 1
min_alpha = 200
max_alpha = 2500
min_sigma = 15
max_sigma = 60
alpha = min_alpha + ((max_alpha-min_alpha) * scale)
alpha *= intensity
sigma = min_sigma + ((max_sigma-min_sigma) * scale)
return get_indices(im_shape, scale=alpha, sigma=sigma)
def transform_image(image, def_map, padding=60, channels=3):
""" conditional transform, depending on presence of
values in each channel """
indices = def_map
image = np.array(image)
image = im_utils.pad(image, padding, mode='reflect')
# We presume there are 3 channels. Checking shape is slow.
for i in range(channels):
if np.sum(image[:, :, i]):
image[:, :, i] = map_coordinates(image[:, :, i], indices, order=1)
image = image[padding:-padding, padding:-padding]
return image | /root_painter_trainer-0.2.27.0.tar.gz/root_painter_trainer-0.2.27.0/trainer/elastic.py | 0.744378 | 0.461805 | elastic.py | pypi |
import numpy as np
VLEN = np.vectorize(len)
def stretch(arr, fields=None, return_indices=False):
"""Stretch an array.
Stretch an array by ``hstack()``-ing multiple array fields while
preserving column names and record array structure. If a scalar field is
specified, it will be stretched along with array fields.
Parameters
----------
arr : NumPy structured or record array
The array to be stretched.
fields : list of strings, optional (default=None)
A list of column names to stretch. If None, then stretch all fields.
return_indices : bool, optional (default=False)
If True, the array index of each stretched array entry will be
returned in addition to the stretched array.
This changes the return type of this function to a tuple consisting
of a structured array and a numpy int64 array.
Returns
-------
ret : A NumPy structured array
The stretched array.
Examples
--------
>>> import numpy as np
>>> from root_numpy import stretch
>>> arr = np.empty(2, dtype=[('scalar', np.int), ('array', 'O')])
>>> arr[0] = (0, np.array([1, 2, 3], dtype=np.float))
>>> arr[1] = (1, np.array([4, 5, 6], dtype=np.float))
>>> stretch(arr, ['scalar', 'array'])
array([(0, 1.0), (0, 2.0), (0, 3.0), (1, 4.0), (1, 5.0), (1, 6.0)],
dtype=[('scalar', '<i8'), ('array', '<f8')])
"""
dtype = []
len_array = None
if fields is None:
fields = arr.dtype.names
# Construct dtype and check consistency
for field in fields:
dt = arr.dtype[field]
if dt == 'O' or len(dt.shape):
if dt == 'O':
# Variable-length array field
lengths = VLEN(arr[field])
else:
lengths = np.repeat(dt.shape[0], arr.shape[0])
# Fixed-length array field
if len_array is None:
len_array = lengths
elif not np.array_equal(lengths, len_array):
raise ValueError(
"inconsistent lengths of array columns in input")
if dt == 'O':
dtype.append((field, arr[field][0].dtype))
else:
dtype.append((field, arr[field].dtype, dt.shape[1:]))
else:
# Scalar field
dtype.append((field, dt))
if len_array is None:
raise RuntimeError("no array column in input")
# Build stretched output
ret = np.empty(np.sum(len_array), dtype=dtype)
for field in fields:
dt = arr.dtype[field]
if dt == 'O' or len(dt.shape) == 1:
# Variable-length or 1D fixed-length array field
ret[field] = np.hstack(arr[field])
elif len(dt.shape):
# Multidimensional fixed-length array field
ret[field] = np.vstack(arr[field])
else:
# Scalar field
ret[field] = np.repeat(arr[field], len_array)
if return_indices:
idx = np.concatenate(list(map(np.arange, len_array)))
return ret, idx
return ret | /root_pandas-0.7.0.tar.gz/root_pandas-0.7.0/root_pandas/utils.py | 0.894225 | 0.757548 | utils.py | pypi |
from collections import Counter
import numpy as np
from numpy.lib.recfunctions import append_fields
from pandas import DataFrame, RangeIndex
import pandas as pd
from root_numpy import root2array, list_trees
import fnmatch
from root_numpy import list_branches
from root_numpy.extern.six import string_types
import itertools
from math import ceil
import re
import ROOT
import warnings
from .utils import stretch
__all__ = [
'read_root',
'to_root',
]
NOEXPAND_PREFIX = 'noexpand:'
def _getitem(string, depth=0):
"""
Get an item from the string (where item is up to the next ',' or '}' or the
end of the string)
"""
out = [""]
while string:
char = string[0]
if depth and (char == ',' or char == '}'):
return out, string
if char == '{':
groups_string = _getgroup(string[1:], depth+1)
if groups_string is not None:
groups, string = groups_string
out = [a + g for a in out for g in groups]
continue
if char == '\\' and len(string) > 1:
string, char = string[1:], char + string[1]
out, string = [a + char for a in out], string[1:]
return out, string
def _getgroup(string, depth):
"""
Get a group from the string, where group is a list of all the comma
separated substrings up to the next '}' char or the brace enclosed substring
if there is no comma
"""
out, comma = [], False
while string:
items, string = _getitem(string, depth)
if not string:
break
out += items
if string[0] == '}':
if comma:
return out, string[1:]
return ['{' + a + '}' for a in out], string[1:]
if string[0] == ',':
comma, string = True, string[1:]
return None
def expand_braces(orig):
return _getitem(orig, 0)[0]
def get_nonscalar_columns(array):
if len(array) == 0:
return []
first_row = array[0]
bad_cols = np.array([x.ndim != 0 for x in first_row])
col_names = np.array(array.dtype.names)
bad_names = col_names[bad_cols]
return list(bad_names)
def get_matching_variables(branches, patterns, fail=True):
# Convert branches to a set to make x "in branches" O(1) on average
branches = set(branches)
# Find any trivial matches
selected = sorted(branches.intersection(patterns),
key=lambda s: patterns.index(s))
# Any matches that weren't trivial need to be looped over...
for pattern in set(patterns).difference(selected):
found = False
# Avoid using fnmatch if the pattern if possible
if re.findall(r'(\*)|(\?)|(\[.*\])|(\[\!.*\])', pattern):
for match in fnmatch.filter(branches, pattern):
found = True
if match not in selected:
selected.append(match)
elif pattern in branches:
raise NotImplementedError('I think this is impossible?')
if not found and fail:
raise ValueError("Pattern '{}' didn't match any branch".format(pattern))
return selected
def filter_noexpand_columns(columns):
"""Return columns not containing and containing the noexpand prefix.
Parameters
----------
columns: sequence of str
A sequence of strings to be split
Returns
-------
Two lists, the first containing strings without the noexpand prefix, the
second containing those that do with the prefix filtered out.
"""
prefix_len = len(NOEXPAND_PREFIX)
noexpand = [c[prefix_len:] for c in columns if c.startswith(NOEXPAND_PREFIX)]
other = [c for c in columns if not c.startswith(NOEXPAND_PREFIX)]
return other, noexpand
def do_flatten(arr, flatten):
if flatten is True:
warnings.warn(" The option flatten=True is deprecated. Please specify the branches you would like "
"to flatten in a list: flatten=['foo', 'bar']", FutureWarning)
arr_, idx = stretch(arr, return_indices=True)
else:
nonscalar = get_nonscalar_columns(arr)
fields = [x for x in arr.dtype.names if (x not in nonscalar or x in flatten)]
for col in flatten:
if col in nonscalar:
pass
elif col in fields:
raise ValueError("Requested to flatten {col} but it has a scalar type"
.format(col=col))
else:
raise ValueError("Requested to flatten {col} but it wasn't loaded from the input file"
.format(col=col))
arr_, idx = stretch(arr, fields=fields, return_indices=True)
arr = append_fields(arr_, '__array_index', idx, usemask=False, asrecarray=True)
return arr
def read_root(paths, key=None, columns=None, ignore=None, chunksize=None, where=None, flatten=False, *args, **kwargs):
"""
Read a ROOT file, or list of ROOT files, into a pandas DataFrame.
Further *args and *kwargs are passed to root_numpy's root2array.
If the root file contains a branch matching __index__*, it will become the DataFrame's index.
Parameters
----------
paths: string or list
The path(s) to the root file(s)
key: string
The key of the tree to load.
columns: str or sequence of str
A sequence of shell-patterns (can contain *, ?, [] or {}). Matching columns are read.
The columns beginning with `noexpand:` are not interpreted as shell-patterns,
allowing formula columns such as `noexpand:2*x`. The column in the returned DataFrame
will not have the `noexpand:` prefix.
ignore: str or sequence of str
A sequence of shell-patterns (can contain *, ?, [] or {}). All matching columns are ignored (overriding the columns argument).
chunksize: int
If this parameter is specified, an iterator is returned that yields DataFrames with `chunksize` rows.
where: str
Only rows that match the expression will be read.
flatten: sequence of str
A sequence of column names. Will use root_numpy.stretch to flatten arrays in the specified columns into
individual entries. All arrays specified in the columns must have the same length for this to work.
Be careful if you combine this with chunksize, as chunksize will refer to the number of unflattened entries,
so you will be iterating over a number of entries that is potentially larger than chunksize.
The index of each element within its former array will be saved in the __array_index column.
Returns
-------
DataFrame created from matching data in the specified TTree
Notes
-----
>>> df = read_root('test.root', 'MyTree', columns=['A{B,C}*', 'D'], where='ABB > 100')
"""
if not isinstance(paths, list):
paths = [paths]
# Use a single file to search for trees and branches, ensuring the key exists
for seed_path in paths:
trees = list_trees(seed_path)
if key and key not in trees:
continue
break
else:
if key:
raise OSError('{} not found in any of the given paths'.format(key))
else:
raise OSError('No trees found in any of the given paths')
if not key:
if len(trees) == 1:
key = trees[0]
elif len(trees) == 0:
raise ValueError('No trees found in {}'.format(seed_path))
else:
raise ValueError('More than one tree found in {}'.format(seed_path))
branches = list_branches(seed_path, key)
if not columns:
all_vars = branches
else:
if isinstance(columns, string_types):
columns = [columns]
# __index__* is always loaded if it exists
# XXX Figure out what should happen with multi-dimensional indices
index_branches = list(filter(lambda x: x.startswith('__index__'), branches))
if index_branches:
columns = columns[:]
columns.append(index_branches[0])
columns, noexpand = filter_noexpand_columns(columns)
columns = list(itertools.chain.from_iterable(list(map(expand_braces, columns))))
all_vars = get_matching_variables(branches, columns) + noexpand
if ignore:
if isinstance(ignore, string_types):
ignore = [ignore]
ignored = get_matching_variables(branches, ignore, fail=False)
ignored = list(itertools.chain.from_iterable(list(map(expand_braces, ignored))))
if any(map(lambda x: x.startswith('__index__'), ignored)):
raise ValueError('__index__* branch is being ignored!')
for var in ignored:
all_vars.remove(var)
if chunksize:
tchain = ROOT.TChain(key)
for path in paths:
tchain.Add(path)
n_entries = tchain.GetEntries()
n_chunks = int(ceil(float(n_entries) / chunksize))
# XXX could explicitly clean up the opened TFiles with TChain::Reset
class genchunk(object):
def __len__(self):
return n_chunks
def __iter__(self):
current_index = 0
for chunk in range(n_chunks):
arr = root2array(paths, key, all_vars, start=chunk * chunksize, stop=(chunk+1) * chunksize, selection=where, *args, **kwargs)
if len(arr) == 0:
continue
if flatten:
arr = do_flatten(arr, flatten)
yield convert_to_dataframe(arr, start_index=current_index)
current_index += len(arr)
return genchunk()
arr = root2array(paths, key, all_vars, selection=where, *args, **kwargs)
if flatten:
arr = do_flatten(arr, flatten)
return convert_to_dataframe(arr)
def convert_to_dataframe(array, start_index=None):
nonscalar_columns = get_nonscalar_columns(array)
# Columns containing 2D arrays can't be loaded so convert them 1D arrays of arrays
reshaped_columns = {}
for col in nonscalar_columns:
if array[col].ndim >= 2:
reshaped = np.zeros(len(array[col]), dtype='O')
for i, row in enumerate(array[col]):
reshaped[i] = row
reshaped_columns[col] = reshaped
indices = list(filter(lambda x: x.startswith('__index__'), array.dtype.names))
if len(indices) == 0:
index = None
if start_index is not None:
index = RangeIndex(start=start_index, stop=start_index + len(array))
df = DataFrame.from_records(array, exclude=reshaped_columns, index=index)
elif len(indices) == 1:
# We store the index under the __index__* branch, where
# * is the name of the index
df = DataFrame.from_records(array, exclude=reshaped_columns, index=indices[0])
index_name = indices[0][len('__index__'):]
if not index_name:
# None means the index has no name
index_name = None
df.index.name = index_name
else:
raise ValueError("More than one index found in file")
# Manually the columns which were reshaped
for key, reshaped in reshaped_columns.items():
df[key] = reshaped
# Reshaping can cause the order of columns to change so we have to change it back
if reshaped_columns:
# Filter to remove __index__ columns
columns = [c for c in array.dtype.names if c in df.columns]
assert len(columns) == len(df.columns), (columns, df.columns)
df = df.reindex(columns, axis=1, copy=False)
# Convert categorical columns back to categories
for c in df.columns:
match = re.match(r'^__rpCaT\*([^\*]+)\*(True|False)\*', c)
if match:
real_name, ordered = match.groups()
categories = c.split('*')[3:]
df[c] = pd.Categorical.from_codes(df[c], categories, ordered={'True': True, 'False': False}[ordered])
df.rename(index=str, columns={c: real_name}, inplace=True)
return df
def to_root(df, path, key='my_ttree', mode='w', store_index=True, *args, **kwargs):
"""
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
"""
if mode == 'a':
mode = 'update'
elif mode == 'w':
mode = 'recreate'
else:
raise ValueError('Unknown mode: {}. Must be "a" or "w".'.format(mode))
column_name_counts = Counter(df.columns)
if max(column_name_counts.values()) > 1:
raise ValueError('DataFrame contains duplicated column names: ' +
' '.join({k for k, v in column_name_counts.items() if v > 1}))
from root_numpy import array2tree
# We don't want to modify the user's DataFrame here, so we make a shallow copy
df_ = df.copy(deep=False)
if store_index:
name = df_.index.name
if name is None:
# Handle the case where the index has no name
name = ''
df_['__index__' + name] = df_.index
# Convert categorical columns into something root_numpy can serialise
for col in df_.select_dtypes(['category']).columns:
name_components = ['__rpCaT', col, str(df_[col].cat.ordered)]
name_components.extend(df_[col].cat.categories)
if ['*' not in c for c in name_components]:
sep = '*'
else:
raise ValueError('Unable to find suitable separator for columns')
df_[col] = df_[col].cat.codes
df_.rename(index=str, columns={col: sep.join(name_components)}, inplace=True)
arr = df_.to_records(index=False)
root_file = ROOT.TFile.Open(path, mode)
if not root_file:
raise IOError("cannot open file {0}".format(path))
if not root_file.IsWritable():
raise IOError("file {0} is not writable".format(path))
# Navigate to the requested directory
open_dirs = [root_file]
for dir_name in key.split('/')[:-1]:
current_dir = open_dirs[-1].Get(dir_name)
if not current_dir:
current_dir = open_dirs[-1].mkdir(dir_name)
current_dir.cd()
open_dirs.append(current_dir)
# The key is now just the top component
key = key.split('/')[-1]
# If a tree with that name exists, we want to update it
tree = open_dirs[-1].Get(key)
if not tree:
tree = None
tree = array2tree(arr, name=key, tree=tree)
tree.Write(key, ROOT.TFile.kOverwrite)
root_file.Close()
# Patch pandas DataFrame to support to_root method
DataFrame.to_root = to_root | /root_pandas-0.7.0.tar.gz/root_pandas-0.7.0/root_pandas/readwrite.py | 0.788054 | 0.297486 | readwrite.py | pypi |
from numpy import (
array, sqrt, cbrt, sign, abs as np_abs, float64, power as np_pow, finfo,
concatenate, inf, around as np_round, isclose, isreal,
)
from ._quadratic import solve_quadratic
from ._tracking import RootTracker
from ._utils import get_shared_figs, check_pairwise
AFTER_1 = 1 + finfo(float64).eps
CUBIC_CONSTANT = 1.324718
def get_quotients_near_triple(A, B, C, D):
"""
Get the quotients for the case when the roots are almost a triple.
"""
a = A
b = - B / 3
c = C / 3
d = - D
if a != 0:
p = b / a
else:
p = inf
if b != 0:
q = c / b
else:
q = inf
if c != 0:
r = d / c
else:
r = inf
return array([p, q, r])
def check_quotients_near_triple(
A, B, C, D, *, num_calls_triple=0, num_calls_double=0
):
"""
Handle the case when the roots are almost a triple.
"""
qs = get_quotients_near_triple(A, B, C, D)
if not check_pairwise(qs, isclose):
return False, None
λ = get_shared_figs(qs)
a = A
b = - B / 3
c = C / 3
d = - D
b_dash = b - λ * a
c_dash = c - λ * b
d_dash = d - λ * c
c_ddash = c_dash - λ * b_dash
d_ddash = d_dash - λ * c_dash
d_star = d_ddash - λ * c_ddash
case, roots = solve_cubic(
a, - 3 * b_dash, 3 * c_ddash, - d_star,
num_calls_triple=num_calls_triple+1,
num_calls_double=num_calls_double,
recurse=(num_calls_triple < 10),
)
if case == "0":
case = "triple"
return True, (case, roots + λ)
def check_quotients_near_double(
A, B, C, D, *, num_calls_triple=0, num_calls_double=0
):
"""
Get the quotients for the case when the roots are almost a double.
"""
qs = get_quotients_near_double(A, B, C, D)
pos_case = check_pairwise(qs, isclose)
neg_case = check_pairwise([qs[0], -qs[1]], isclose)
if not (pos_case or neg_case):
return False, None
if pos_case:
λ = get_shared_figs(qs)
elif neg_case:
λ = get_shared_figs(array([qs[0], -qs[1]]))
else:
raise RuntimeError("double quotients check has problem")
a = A
b = - B / 3
c = C / 3
d = - D
b_dash = b - λ * a
c_dash = c - λ * b
c_ddash = c_dash - λ * b_dash
if d < a * np_pow(λ, 3):
D = np_round(d)
δ = d - D
d_dash = D - λ * c
d_ddash = d_dash - λ * c_dash
d_star = (d_ddash - λ * c_ddash) + δ
else:
d_dash = d - λ * c
d_ddash = d_dash - λ * c_dash
d_star = d_ddash - λ * c_ddash
case, roots = solve_cubic(
a, - 3 * b_dash, 3 * c_ddash, - d_star,
num_calls_triple=num_calls_triple,
num_calls_double=num_calls_double+1,
recurse=(num_calls_double < 10),
)
return True, (case, roots + λ)
def get_quotients_near_double(A, B, C, D):
"""
Handle the case when the roots are almost a double.
"""
a = A
b = - B / 3
c = C / 3
d = - D
p = (b * c - a * d) / (np_pow(b, 2) - a * c) / 2
q = sqrt((np_pow(c, 2) - b * d) / (np_pow(b, 2) - a * c))
return array([p, q])
def _eval(X, A, B, C, D):
"""
Python implementation of EVAL
"""
q0 = A * X
B1 = q0 + B
C2 = B1 * X + C
Q_dash = (q0 + B1) * X + C2
Q = C2 * X + D
return Q, Q_dash, B1, C2
def _fin(X, A, b1, c2):
"""
Python implementation of `fin`
"""
return concatenate([[X], solve_quadratic(A, b1, c2)])
def _iter(x0, A, B, C, D, after_1=AFTER_1):
"""
The body of the iteration to find the first root
"""
X = x0
q, q_dash, b1, c2 = _eval(X, A, B, C, D)
if q_dash == 0:
x0 = X
else:
x0 = X - (q / q_dash) / after_1
return X, x0, b1, c2
def solve_cubic(
A, B, C, D, *, num_calls_triple=0, num_calls_double=0, recurse=True,
after_1=AFTER_1
):
"""
Solves the quadratic A x^3 + B x^2 + C x + D.
Python implementation of QBC
"""
# pylint: disable=too-many-branches
if isreal(A):
A = A.real
if isreal(B):
B = B.real
if isreal(C):
C = C.real
if isreal(D):
D = D.real
if A == 0:
return "quad", _fin(inf, A, B, C)
if D == 0:
return "0", _fin(0, A, B, C)
if recurse:
is_near_triple, ret_vals = check_quotients_near_triple(
A, B, C, D, num_calls_triple=num_calls_triple,
num_calls_double=num_calls_double,
)
if is_near_triple:
return ret_vals
is_near_double, ret_vals = check_quotients_near_double(
A, B, C, D, num_calls_triple=num_calls_triple,
num_calls_double=num_calls_double,
)
if is_near_double:
return ret_vals
X = - (B / A) / 3
q, q_dash, b1, c2 = _eval(X, A, B, C, D)
t = q / A
r = cbrt(np_abs(t))
s = sign(t)
t = - q_dash / A
if t > 0:
r = CUBIC_CONSTANT * max([r, sqrt(t)])
x0 = X - s * r
if x0 == X:
return "no iter", _fin(X, A, b1, c2)
X, x0, b1, c2 = _iter(x0, A, B, C, D, after_1=after_1)
while s * x0 > s * X:
X, x0, b1, c2 = _iter(x0, A, B, C, D, after_1=after_1)
if np_abs(A) * np_pow(X, 2) > np_abs(D * X):
c2 = - D / X
b1 = (c2 - C) / X
return "iter", _fin(X, A, b1, c2)
class CubicTracker(RootTracker):
"""
Track a given root of a changing cubic equation
"""
def _finalise(self, **kwargs):
self._after_1 = 1 + finfo(self._dtype).eps
def _solve_root(self, coef, **kwargs):
# pylint: disable=unused-variable
A, B, C, D = coef
case, roots = solve_cubic(A, B, C, D, after_1=self._after_1)
return roots | /root-solver-0.1.1.tar.gz/root-solver-0.1.1/src/root_solver/_cubic.py | 0.641422 | 0.488893 | _cubic.py | pypi |
import click
import glob
import numpy as np
import os
import pathlib
import sys
import tifffile as tiff
import torch
from rich import traceback, print
from rts_package.models.unet import U2NET
from torchvision.datasets.utils import download_url
from urllib.error import URLError
WD = os.path.dirname(__file__)
@click.command()
@click.option('-i', '--input', required=True, type=str, help='Path to data file to predict.')
@click.option('-m', '--model', type=str,
help='Path to an already trained XGBoost model. If not passed a default model will be loaded.')
@click.option('-c/-nc', '--cuda/--no-cuda', type=bool, default=False, help='Whether to enable cuda or not')
@click.option('-s/-ns', '--sanitize/--no-sanitize', type=bool, default=False,
help='Whether to remove model after prediction or not.')
@click.option('-suf', '--suffix', type=str, help='Path to write the output to')
@click.option('-o', '--output', default="", required=True, type=str, help='Path to write the output to')
@click.option('-h', '--ome', type=bool, default=False, help='human readable output (OME-TIFF format), input and output as image channels')
def main(input: str, suffix: str, model: str, cuda: bool, output: str, sanitize: bool, ome: bool):
"""Command-line interface for rts-pred"""
print(r"""[bold blue]
rts-pred
""")
print('[bold blue] Run [green]rts-pred --help [blue]for an overview of all commands\n')
if not model:
model = get_pytorch_model(os.path.join(f'{os.getcwd()}', "models", "model.ckpt"))
else:
model = get_pytorch_model(model)
if cuda:
model.cuda()
print('[bold blue] Parsing data...')
if os.path.isdir(input):
input_list = glob.glob(os.path.join(input, "*"))
for input_i in input_list:
print(f'[bold yellow] Input: {input_i}')
file_prediction(input_i, model, input_i.replace(input, output).replace(".tif", suffix), ome_out=ome)
else:
file_prediction(input, model, output, ome_out=ome)
if sanitize:
os.remove(os.path.join(f'{WD}', "models", "model.ckpt"))
def file_prediction(input, model, output, ome_out=False):
data_to_predict = read_data_to_predict(input)
predictions = predict(data_to_predict, model)
if ome_out:
print(f'[bold green] Output: {output}.ome.tif')
write_ome_out(data_to_predict, predictions, output)
else:
print(f'[bold green] Output: {output}.npy')
write_results(predictions, output)
def read_data_to_predict(path_to_data_to_predict: str):
"""
Parses the data to predict and returns a full Dataset include the DMatrix
:param path_to_data_to_predict: Path to the data on which predictions should be performed on
"""
return tiff.imread(path_to_data_to_predict)
def write_results(predictions: np.ndarray, path_to_write_to) -> None:
"""
Writes the predictions into a human readable file.
:param predictions: Predictions as a numpy array
:param path_to_write_to: Path to write the predictions to
"""
os.makedirs(pathlib.Path(path_to_write_to).parent.absolute(), exist_ok=True)
np.save(path_to_write_to, predictions)
pass
def write_ome_out(input_data, results_array, path_to_write_to) -> None:
"""
TODO
"""
os.makedirs(pathlib.Path(path_to_write_to).parent.absolute(), exist_ok=True)
#print("write_ome_out input: " + str(input_data.shape))
#print("write_ome_out output: " + str(results_array.shape))
full_image = np.zeros((512, 512, 2))
full_image[:, :, 0] = input_data[0, :, :]
full_image[:, :, 1] = results_array
full_image = np.transpose(full_image, (2, 0, 1))
with tiff.TiffWriter(os.path.join(path_to_write_to + ".ome.tif")) as tif_file:
tif_file.write(full_image, photometric='minisblack', metadata={'axes': 'CYX', 'Channel': {'Name': ["image", "seg_mask"]}})
pass
def get_pytorch_model(path_to_pytorch_model: str):
"""
Fetches the model of choice and creates a booster from it.
:param path_to_pytorch_model: Path to the Pytorch model1
"""
download(path_to_pytorch_model)
model = U2NET.load_from_checkpoint(path_to_pytorch_model, num_classes=5, len_test_set=120, strict=False).to('cpu')
model.eval()
return model
def predict(data_to_predict, model):
img = data_to_predict[0, :, :]
img = torch.from_numpy(np.expand_dims(np.expand_dims(img, 0), 0)).float()
logits = model(img)[0]
prediction = torch.argmax(logits.squeeze(), dim=0).cpu().detach().numpy().squeeze()
return prediction
def _check_exists(filepath) -> bool:
return os.path.exists(filepath)
def download(filepath) -> None:
"""Download the model if it doesn't exist in processed_folder already."""
if _check_exists(filepath):
return
mirrors = [
'https://zenodo.org/record/',
]
resources = [
("mark1-PHDFM-u2net-model.ckpt", "6937290/files/mark1-PHDFM-u2net-model.ckpt", "5dd5d425afb4b17444cb31b1343f23dc"),
]
# download files
for filename, uniqueID, md5 in resources:
for mirror in mirrors:
url = "{}{}".format(mirror, uniqueID)
try:
print("Downloading {}".format(url))
download_url(
url, root=str(pathlib.Path(filepath).parent.absolute()),
filename=filename,
md5=md5
)
except URLError as error:
print(
"Failed to download (trying next):\n{}".format(error)
)
continue
finally:
print()
break
else:
raise RuntimeError("Error downloading {}".format(filename))
print('Done!')
if __name__ == "__main__":
traceback.install()
sys.exit(main()) # pragma: no cover | /root-tissue-seg-package-1.0.7.tar.gz/root-tissue-seg-package-1.0.7/rts_package/cli_pred.py | 0.461259 | 0.345864 | cli_pred.py | pypi |
import click
import glob
import numpy as np
import os
import pathlib
import sys
import tifffile as tiff
import torch
from rich import traceback, print
from rts_package.models.unet import U2NET
from torchvision.datasets.utils import download_url
from urllib.error import URLError
from captum.attr import visualization as viz
from captum.attr import LayerGradCam, FeatureAblation, LayerActivation, LayerAttribution
from captum.attr import GuidedGradCam
import torch.nn as nn
WD = os.path.dirname(__file__)
@click.command()
@click.option('-i', '--input', required=True, type=str, help='Path to data file to predict.')
@click.option('-m', '--model', type=str,
help='Path to an already trained XGBoost model. If not passed a default model will be loaded.')
@click.option('-c/-nc', '--cuda/--no-cuda', type=bool, default=False, help='Whether to enable cuda or not')
@click.option('-s/-ns', '--sanitize/--no-sanitize', type=bool, default=False,
help='Whether to remove model after prediction or not.')
@click.option('-suf', '--suffix', type=str, help='Path to write the output to')
@click.option('-o', '--output', default="", required=True, type=str, help='Path to write the output to')
@click.option('-f', '--feat', default='_feat.ome.tif', type=str, help='Filename for ggcam features output')
@click.option('-t', '--target', required=True, type=int,
help='Output indices for which gradients are computed (target class)')
@click.option('-h', '--ome', type=bool, default=False, help='human readable output (OME-TIFF format), input and output as image channels')
def main(input: str, suffix: str, model: str, cuda: bool, output: str, sanitize: bool, feat: str, target: int, ome:bool):
"""Command-line interface for rts-feat-imp"""
print(r"""[bold blue]
rts-feat-imp
""")
print('[bold blue]Run [green]rts-feat-imp --help [blue]for an overview of all commands\n')
out_filename = feat
target_class = target
print('[bold blue] Calculating Guided Grad-CAM features...')
print('[bold blue] Target class: ' + str(target_class))
if not model:
model = get_pytorch_model(os.path.join(f'{os.getcwd()}', "models", "model.ckpt"))
else:
model = get_pytorch_model(model)
if cuda:
model.cuda()
print('[bold blue] Parsing data...')
if os.path.isdir(input):
input_list = glob.glob(os.path.join(input, "*"))
for inputs in input_list:
print(f'[bold yellow] Input: {inputs}')
file_feature_importance(inputs, model, target_class, inputs.replace(input, output).replace(".tif", suffix), ome_out=ome)
else:
file_feature_importance(input, model, output, ome_out=ome)
if sanitize:
os.remove(os.path.join(f'{WD}', "models", "model.ckpt"))
def file_feature_importance(input, model, target_class, output, ome_out=False):
input_data = read_input_data(input)
feat_ggcam = features_ggcam(model, input_data, target_class)
if ome_out:
print(f'[bold green] Output: {output}_ggcam_t_{target_class}.ome.tif')
write_ome_out(input_data, feat_ggcam, output + "_ggcam_t_" + str(target_class))
else:
print(f'[bold green] Output: {output}_ggcam_t_{target_class}.npy')
write_results(feat_ggcam, output + "_ggcam_t_" + str(target_class))
#print(f'[bold green] Output: {output}_ggcam_t_{target_class}')
#write_ome_out(input_data, feat_ggcam, output + "_ggcam_t_" + str(target_class))
#write_results(feat_ggcam, output + "_ggcam_t_" + str(target_class))
def features_ggcam(net, data_to_predict, target_class):
"""
TODO
"""
net.eval()
img = data_to_predict[0, :, :]
img = torch.from_numpy(np.expand_dims(np.expand_dims(img, 0), 0)).float()
wrapped_net = agg_segmentation_wrapper_module(net)
guided_gc = GuidedGradCam(wrapped_net, wrapped_net._model.outconv)
gc_attr = guided_gc.attribute(img, target=target_class)
gc_attr = torch.abs(gc_attr)
#print("ggcam out shape: " + str(gc_attr.shape))
img_out = gc_attr.squeeze(0).squeeze(0).cpu().detach().numpy()
return img_out
class agg_segmentation_wrapper_module(nn.Module):
def __init__(self, model):
super(agg_segmentation_wrapper_module, self).__init__()
self._model = model
def forward(self, x):
model_out = self._model(x)[0]
out_max = torch.argmax(model_out, dim=1, keepdim=True)
selected_inds = torch.zeros_like(model_out[0:4]).scatter_(1, out_max, 1)
return (model_out * selected_inds).sum(dim=(2, 3))
def read_input_data(path_to_input_data: str):
"""
Reads the data of an input image
:param path_to_input_data: Path to the input data file
"""
return tiff.imread(path_to_input_data)
def write_results(results_array: np.ndarray, path_to_write_to) -> None:
"""
Writes the output into a file.
:param results_array: output as a numpy array
:param path_to_write_to: Output path
"""
os.makedirs(pathlib.Path(path_to_write_to).parent.absolute(), exist_ok=True)
np.save(path_to_write_to, results_array)
pass
def write_ome_out(input_data, results_array, path_to_write_to) -> None:
"""
TODO
"""
os.makedirs(pathlib.Path(path_to_write_to).parent.absolute(), exist_ok=True)
#print("write_ome_out input: " + str(input_data.shape))
#print("write_ome_out output: " + str(results_array.shape))
full_image = np.zeros((512, 512, 2))
full_image[:, :, 0] = input_data[0, :, :]
full_image[:, :, 1] = results_array
full_image = np.transpose(full_image, (2, 0, 1))
with tiff.TiffWriter(os.path.join(path_to_write_to + ".ome.tif")) as tif_file:
tif_file.write(full_image, photometric='minisblack', metadata={'axes': 'CYX', 'Channel': {'Name': ["image", "ggcam"]}})
pass
def get_pytorch_model(path_to_pytorch_model: str):
"""
Fetches the model of choice and creates a booster from it.
:param path_to_pytorch_model: Path to the Pytorch model1
"""
download(path_to_pytorch_model)
model = U2NET.load_from_checkpoint(path_to_pytorch_model, num_classes=5, len_test_set=120, strict=False).to('cpu')
model.eval()
return model
def _check_exists(filepath) -> bool:
return os.path.exists(filepath)
def download(filepath) -> None:
"""Download the model if it doesn't exist in processed_folder already."""
if _check_exists(filepath):
return
mirrors = [
'https://zenodo.org/record/',
]
resources = [
("mark1-PHDFM-u2net-model.ckpt", "6937290/files/mark1-PHDFM-u2net-model.ckpt", "5dd5d425afb4b17444cb31b1343f23dc"),
]
# download files
for filename, uniqueID, md5 in resources:
for mirror in mirrors:
url = "{}{}".format(mirror, uniqueID)
try:
print("Downloading {}".format(url))
download_url(
url, root=str(pathlib.Path(filepath).parent.absolute()),
filename=filename,
md5=md5
)
except URLError as error:
print(
"Failed to download (trying next):\n{}".format(error)
)
continue
finally:
print()
break
else:
raise RuntimeError("Error downloading {}".format(filename))
print('Done!')
if __name__ == "__main__":
traceback.install()
sys.exit(main()) # pragma: no cover | /root-tissue-seg-package-1.0.7.tar.gz/root-tissue-seg-package-1.0.7/rts_package/cli_feat_imp.py | 0.514644 | 0.307163 | cli_feat_imp.py | pypi |
import click
import glob
import numpy as np
import os
import pathlib
import sys
import tifffile as tiff
import torch
from rich import traceback, print
from rts_package.models.unet import U2NET
from torchvision.datasets.utils import download_url
from urllib.error import URLError
from rts_package.utils import monte_carlo_dropout_proc
WD = os.path.dirname(__file__)
@click.command()
@click.option('-i', '--input', required=True, type=str, help='Path to data file to predict.')
@click.option('-m', '--model', type=str,
help='Path to an already trained XGBoost model. If not passed a default model will be loaded.')
@click.option('-c/-nc', '--cuda/--no-cuda', type=bool, default=False, help='Whether to enable cuda or not')
@click.option('-s/-ns', '--sanitize/--no-sanitize', type=bool, default=False,
help='Whether to remove model after prediction or not.')
@click.option('-suf', '--suffix', type=str, help='Path to write the output to')
@click.option('-o', '--output', default="", required=True, type=str, help='Path to write the output to')
@click.option('-t', '--iter', default=10, required=True, type=int, help='Number of MC-Dropout interations')
@click.option('-h', '--ome', type=bool, default=False, help='human readable output (OME-TIFF format), input and output as image channels')
def main(input: str, suffix: str, model: str, cuda: bool, output: str, sanitize: bool, iter: int, ome: bool):
"""Command-line interface for rts-pred-uncert"""
print(r"""[bold blue]
rts-pred-uncert
""")
print('[bold blue]Run [green]rts-pred-uncert --help [blue]for an overview of all commands\n')
if not model:
model = get_pytorch_model(os.path.join(f'{os.getcwd()}', "models", "model.ckpt"))
else:
model = get_pytorch_model(model)
if cuda:
model.cuda()
print('[bold blue] Calculating prediction uncertainty via MC-Dropout')
print('[bold blue] Parsing data...')
if os.path.isdir(input):
input_list = glob.glob(os.path.join(input, "*"))
for inputs in input_list:
print(f'[bold yellow] Input: {inputs}')
file_uncert(inputs, model, inputs.replace(input, output).replace(".tif", suffix), mc_dropout_it=iter, ome_out=ome)
else:
file_uncert(input, model, output, ome_out=ome)
if sanitize:
os.remove(os.path.join(f'{WD}', "models", "model.ckpt"))
def file_uncert(input, model, output, mc_dropout_it=10, ome_out=False):
input_data = read_input_data(input)
pred_std = prediction_std(model, input_data, t=mc_dropout_it)
if ome_out:
print(f'[bold green] Output: {output}_uncert_.ome.tif')
write_ome_out(input_data, pred_std, output + "_uncert_")
else:
print(f'[bold green] Output: {output}_uncert_.npy')
write_results(pred_std, output + "_uncert_")
def prediction_std(net, img, t=10):
"""
TODO
"""
net.eval()
img = img[0, :, :]
img = torch.from_numpy(np.expand_dims(np.expand_dims(img, 0), 0)).float()
pred_std = monte_carlo_dropout_proc(net, img, T=t)
pred_std = pred_std.detach().cpu().numpy().astype(np.float32)
return pred_std
def read_input_data(path_to_input_data: str):
"""
Reads the data of an input image
:param path_to_input_data: Path to the input data file
"""
return tiff.imread(path_to_input_data)
def write_results(results_array: np.ndarray, path_to_write_to) -> None:
"""
Writes the output into a file.
:param results_array: output as a numpy array
:param path_to_write_to: Output path
"""
os.makedirs(pathlib.Path(path_to_write_to).parent.absolute(), exist_ok=True)
np.save(path_to_write_to, results_array)
pass
def write_ome_out(input_data, results_array, path_to_write_to) -> None:
"""
TODO
"""
os.makedirs(pathlib.Path(path_to_write_to).parent.absolute(), exist_ok=True)
#print("write_ome_out input: " + str(input_data.shape))
#print("write_ome_out output: " + str(results_array.shape))
full_image = np.zeros((512, 512, 2))
full_image[:, :, 0] = input_data[0, :, :]
full_image[:, :, 1] = results_array
full_image = np.transpose(full_image, (2, 0, 1))
with tiff.TiffWriter(os.path.join(path_to_write_to + ".ome.tif")) as tif_file:
tif_file.write(full_image, photometric='minisblack', metadata={'axes': 'CYX', 'Channel': {'Name': ["image", "uncert_map"]}})
pass
def get_pytorch_model(path_to_pytorch_model: str):
"""
Fetches the model of choice and creates a booster from it.
:param path_to_pytorch_model: Path to the Pytorch model1
"""
download(path_to_pytorch_model)
model = U2NET.load_from_checkpoint(path_to_pytorch_model, num_classes=5, len_test_set=120, strict=False).to('cpu')
model.eval()
return model
def _check_exists(filepath) -> bool:
return os.path.exists(filepath)
def download(filepath) -> None:
"""Download the model if it doesn't exist in processed_folder already."""
if _check_exists(filepath):
return
mirrors = [
'https://zenodo.org/record/',
]
resources = [
("mark1-PHDFM-u2net-model.ckpt", "6937290/files/mark1-PHDFM-u2net-model.ckpt", "5dd5d425afb4b17444cb31b1343f23dc"),
]
# download files
for filename, uniqueID, md5 in resources:
for mirror in mirrors:
url = "{}{}".format(mirror, uniqueID)
try:
print("Downloading {}".format(url))
download_url(
url, root=str(pathlib.Path(filepath).parent.absolute()),
filename=filename,
md5=md5
)
except URLError as error:
print(
"Failed to download (trying next):\n{}".format(error)
)
continue
finally:
print()
break
else:
raise RuntimeError("Error downloading {}".format(filename))
print('Done!')
if __name__ == "__main__":
traceback.install()
sys.exit(main()) # pragma: no cover | /root-tissue-seg-package-1.0.7.tar.gz/root-tissue-seg-package-1.0.7/rts_package/cli_uncert_pred.py | 0.439026 | 0.260272 | cli_uncert_pred.py | pypi |
from typing import Any, Optional
import torch
from torch import nn
__all__ = ['UNET', 'NESTEDUNET', 'U2NET']
# Originally contained own implementation of was changed because of weight init
# and adopted from https://github.com/ShawnBIT/UNet-family/blob/master/networks/UNet.py
from rts_package.models.unet_super import UNetsuper
from rts_package.models.unet_utils import init_weights, unetConv2, unetUp, _size_map, _upsample_like, RSU
class UNET(UNetsuper):
def __init__(self, num_classes, len_test_set, hparams, input_channels, min_filter, feature_scale=2, is_deconv=True,
is_batchnorm=True, **kwargs):
super().__init__(num_classes, len_test_set, hparams, input_channels, min_filter, **kwargs)
self.in_channels = input_channels
self.feature_scale = feature_scale
self.is_deconv = is_deconv
self.is_batchnorm = is_batchnorm
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.maxpool = nn.MaxPool2d(kernel_size=2)
self.conv1 = unetConv2(self.in_channels, filters[0], self.is_batchnorm)
self.conv2 = unetConv2(filters[0], filters[1], self.is_batchnorm)
self.conv3 = unetConv2(filters[1], filters[2], self.is_batchnorm)
self.conv4 = unetConv2(filters[2], filters[3], self.is_batchnorm)
self.center = unetConv2(filters[3], filters[4], self.is_batchnorm)
# upsampling
self.up_concat4 = unetUp(filters[4], filters[3], self.is_deconv)
self.up_concat3 = unetUp(filters[3], filters[2], self.is_deconv)
self.up_concat2 = unetUp(filters[2], filters[1], self.is_deconv)
self.up_concat1 = unetUp(filters[1], filters[0], self.is_deconv)
# final conv (without any concat)
self.final = nn.Conv2d(filters[0], num_classes, 1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm2d):
init_weights(m, init_type='kaiming')
def forward(self, inputs):
conv1 = self.conv1(inputs) # 16*512*512
maxpool1 = self.maxpool(conv1) # 16*256*256
conv2 = self.conv2(maxpool1) # 32*256*256
maxpool2 = self.maxpool(conv2) # 32*128*128
conv3 = self.conv3(maxpool2) # 64*128*128
maxpool3 = self.maxpool(conv3) # 64*64*64
conv4 = self.conv4(maxpool3) # 128*64*64
maxpool4 = self.maxpool(conv4) # 128*32*32
center = self.center(maxpool4) # 256*32*32
up4 = self.up_concat4(center, conv4) # 128*64*64
up3 = self.up_concat3(up4, conv3) # 64*128*128
up2 = self.up_concat2(up3, conv2) # 32*256*256
up1 = self.up_concat1(up2, conv1) # 16*512*512
final = self.final(up1)
return torch.sigmoid(final)
class NESTEDUNET(UNetsuper):
def __init__(self, num_classes, len_test_set, hparams, input_channels, min_filter, feature_scale=2, is_deconv=True,
is_batchnorm=True, is_ds=True, **kwargs):
super().__init__(num_classes, len_test_set, hparams, input_channels, min_filter, **kwargs)
self.in_channels = input_channels
self.feature_scale = feature_scale
self.is_deconv = is_deconv
self.is_batchnorm = is_batchnorm
self.is_ds = is_ds
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.maxpool = nn.MaxPool2d(kernel_size=2)
self.conv00 = unetConv2(self.in_channels, filters[0], self.is_batchnorm)
self.conv10 = unetConv2(filters[0], filters[1], self.is_batchnorm)
self.conv20 = unetConv2(filters[1], filters[2], self.is_batchnorm)
self.conv30 = unetConv2(filters[2], filters[3], self.is_batchnorm)
self.conv40 = unetConv2(filters[3], filters[4], self.is_batchnorm)
# upsampling
self.up_concat01 = unetUp(filters[1], filters[0], self.is_deconv)
self.up_concat11 = unetUp(filters[2], filters[1], self.is_deconv)
self.up_concat21 = unetUp(filters[3], filters[2], self.is_deconv)
self.up_concat31 = unetUp(filters[4], filters[3], self.is_deconv)
self.up_concat02 = unetUp(filters[1], filters[0], self.is_deconv, 3)
self.up_concat12 = unetUp(filters[2], filters[1], self.is_deconv, 3)
self.up_concat22 = unetUp(filters[3], filters[2], self.is_deconv, 3)
self.up_concat03 = unetUp(filters[1], filters[0], self.is_deconv, 4)
self.up_concat13 = unetUp(filters[2], filters[1], self.is_deconv, 4)
self.up_concat04 = unetUp(filters[1], filters[0], self.is_deconv, 5)
# final conv (without any concat)
self.final_1 = nn.Conv2d(filters[0], num_classes, 1)
self.final_2 = nn.Conv2d(filters[0], num_classes, 1)
self.final_3 = nn.Conv2d(filters[0], num_classes, 1)
self.final_4 = nn.Conv2d(filters[0], num_classes, 1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm2d):
init_weights(m, init_type='kaiming')
def forward(self, inputs):
# column : 0
X_00 = self.conv00(inputs) # 16*512*512
maxpool0 = self.maxpool(X_00) # 16*256*256
X_10 = self.conv10(maxpool0) # 32*256*256
maxpool1 = self.maxpool(X_10) # 32*128*128
X_20 = self.conv20(maxpool1) # 64*128*128
maxpool2 = self.maxpool(X_20) # 64*64*64
X_30 = self.conv30(maxpool2) # 128*64*64
maxpool3 = self.maxpool(X_30) # 128*32*32
X_40 = self.conv40(maxpool3) # 256*32*32
# column : 1
X_01 = self.up_concat01(X_10, X_00)
X_11 = self.up_concat11(X_20, X_10)
X_21 = self.up_concat21(X_30, X_20)
X_31 = self.up_concat31(X_40, X_30)
# column : 2
X_02 = self.up_concat02(X_11, X_00, X_01)
X_12 = self.up_concat12(X_21, X_10, X_11)
X_22 = self.up_concat22(X_31, X_20, X_21)
# column : 3
X_03 = self.up_concat03(X_12, X_00, X_01, X_02)
X_13 = self.up_concat13(X_22, X_10, X_11, X_12)
# column : 4
X_04 = self.up_concat04(X_13, X_00, X_01, X_02, X_03)
# final layer
final_1 = self.final_1(X_01)
final_2 = self.final_2(X_02)
final_3 = self.final_3(X_03)
final_4 = self.final_4(X_04)
final = (final_1 + final_2 + final_3 + final_4) / 4
if self.is_ds:
return final
else:
return final_4
class U2NET(UNetsuper):
def __init__(self, num_classes, len_test_set: int, input_channels=1, min_filter=32, **kwargs):
super().__init__(num_classes, len_test_set, input_channels, min_filter, **kwargs)
self._make_layers(input_channels, min_filter)
def forward(self, x):
sizes = _size_map(x, self.height)
maps = [] # storage for maps
# side saliency map
def unet(x, height=1):
if height < 6:
x1 = getattr(self, f'stage{height}')(x)
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
x = getattr(self, f'stage{height}d')(torch.cat((x2, x1), 1))
side(x, height)
return _upsample_like(x, sizes[height - 1]) if height > 1 else x
else:
x = getattr(self, f'stage{height}')(x)
side(x, height)
return _upsample_like(x, sizes[height - 1])
def side(x, h):
# side output saliency map (before sigmoid)
x = getattr(self, f'side{h}')(x)
x = _upsample_like(x, sizes[1])
maps.append(x)
def fuse():
# fuse saliency probability maps
maps.reverse()
x = torch.cat(maps, 1)
x = getattr(self, 'outconv')(x)
maps.insert(0, x)
return [torch.sigmoid(x) for x in maps]
unet(x)
maps = fuse()
return maps
def _make_layers(self, input_channels, min_filter):
cfgs = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, input_channels, min_filter, min_filter * 2), -1],
'stage2': ['En_2', (6, min_filter * 2, min_filter, min_filter * 2 ** 2), -1],
'stage3': ['En_3', (5, min_filter * 2 ** 2, min_filter * 2, min_filter * 2 ** 3), -1],
'stage4': ['En_4', (4, min_filter * 2 ** 3, min_filter * 2 ** 2, min_filter * 2 ** 4), -1],
'stage5': ['En_5', (4, min_filter * 2 ** 4, min_filter * 2 ** 3, min_filter * 2 ** 4, True), -1],
'stage6': ['En_6', (4, min_filter * 2 ** 4, min_filter * 2 ** 3, min_filter * 2 ** 4, True),
min_filter * 2 ** 4],
'stage5d': ['De_5', (4, min_filter * 2 ** 5, min_filter * 2 ** 3, min_filter * 2 ** 4, True),
min_filter * 2 ** 4],
'stage4d': ['De_4', (4, min_filter * 2 ** 5, min_filter * 2 ** 2, min_filter * 2 ** 3),
min_filter * 2 ** 3],
'stage3d': ['De_3', (5, min_filter * 2 ** 4, min_filter * 2, min_filter * 2 ** 2), min_filter * 2 ** 2],
'stage2d': ['De_2', (6, min_filter * 2 ** 3, min_filter, min_filter * 2), min_filter * 2],
'stage1d': ['De_1', (7, min_filter * 2 ** 2, int(min_filter * 2 ** (1 / 2)), min_filter * 2),
min_filter * 2],
}
cfgs = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, 1, 32, 64), -1],
'stage2': ['En_2', (6, 64, 32, 128), -1],
'stage3': ['En_3', (5, 128, 64, 256), -1],
'stage4': ['En_4', (4, 256, 128, 512), -1],
'stage5': ['En_5', (4, 512, 256, 512, True), -1],
'stage6': ['En_6', (4, 512, 256, 512, True), 512],
'stage5d': ['De_5', (4, 1024, 256, 512, True), 512],
'stage4d': ['De_4', (4, 1024, 128, 256), 256],
'stage3d': ['De_3', (5, 512, 64, 128), 128],
'stage2d': ['De_2', (6, 256, 32, 64), 64],
'stage1d': ['De_1', (7, 128, 16, 64), 64],
}
self.height = int((len(cfgs) + 1) / 2)
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
for k, v in cfgs.items():
# build rsu block
self.add_module(k, RSU(v[0], *v[1]))
if v[2] > 0:
# build side layer
self.add_module(f'side{v[0][-1]}', nn.Conv2d(v[2], self.num_classes, 3, padding=1))
# build fuse layer
self.add_module('outconv', nn.Conv2d(int(self.height * self.num_classes), self.num_classes, 1))
def loss(self, logits, labels):
"""
Initializes the loss function
:return: output - Initialized cross entropy loss function
"""
labels = labels.long()
loss = 0
for logit in logits:
loss += self.criterion(logit, labels)
return loss
def predict(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None):
data, target = batch
output = self.forward(data)
_, prediction = torch.max(output[0], dim=1)
return data, target, output, prediction | /root-tissue-seg-package-1.0.7.tar.gz/root-tissue-seg-package-1.0.7/rts_package/models/unet.py | 0.939401 | 0.459137 | unet.py | pypi |
import math
import torch
from torch import nn
from torch.nn import init
def init_weights(net, init_type='normal'):
# print('initialization method [%s]' % init_type)
if init_type == 'kaiming':
net.apply(weights_init_kaiming)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
else:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n + 1):
conv = getattr(self, 'conv%d' % i)
x = conv(x)
return x
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp, self).__init__()
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, padding=0)
else:
self.up = nn.Sequential(
nn.UpsamplingNearest2d(scale_factor=2),
nn.Conv2d(in_size, out_size, 1))
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1:
continue
init_weights(m, init_type='kaiming')
def forward(self, high_feature, *low_feature):
outputs0 = self.up(high_feature)
for feature in low_feature:
outputs0 = torch.cat([outputs0, feature], 1)
return self.conv(outputs0)
def _upsample_like(x, size):
return nn.Upsample(size=size, mode='nearest')(x)
def _size_map(x, height):
# {height: size} for Upsample
size = list(x.shape[-2:])
sizes = {}
for h in range(1, height):
sizes[h] = size
size = [math.ceil(w / 2) for w in size]
return sizes
class REBNCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dilate=1):
super(REBNCONV, self).__init__()
self.dropout_1 = nn.Dropout2d(0.0)
self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu_s1(self.bn_s1(self.conv_s1(self.dropout_1(x))))
class RSU(nn.Module):
def __init__(self, name, height, in_ch, mid_ch, out_ch, dilated=False):
super(RSU, self).__init__()
self.name = name
self.height = height
self.dilated = dilated
self._make_layers(height, in_ch, mid_ch, out_ch, dilated)
def forward(self, x):
sizes = _size_map(x, self.height)
x = self.rebnconvin(x)
# U-Net like symmetric encoder-decoder structure
def unet(x, height=1):
if height < self.height:
x1 = getattr(self, f'rebnconv{height}')(x)
if not self.dilated and height < self.height - 1:
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
else:
x2 = unet(x1, height + 1)
x = getattr(self, f'rebnconv{height}d')(torch.cat((x2, x1), 1))
return _upsample_like(x, sizes[height - 1]) if not self.dilated and height > 1 else x
else:
return getattr(self, f'rebnconv{height}')(x)
return x + unet(x)
def _make_layers(self, height, in_ch, mid_ch, out_ch, dilated=False):
self.add_module('rebnconvin', REBNCONV(in_ch, out_ch))
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.add_module('rebnconv1', REBNCONV(out_ch, mid_ch))
self.add_module('rebnconv1d', REBNCONV(mid_ch * 2, out_ch))
for i in range(2, height):
dilate = 1 if not dilated else 2 ** (i - 1)
self.add_module(f'rebnconv{i}', REBNCONV(mid_ch, mid_ch, dilate=dilate))
self.add_module(f'rebnconv{i}d', REBNCONV(mid_ch * 2, mid_ch, dilate=dilate))
dilate = 2 if not dilated else 2 ** (height - 1)
self.add_module(f'rebnconv{height}', REBNCONV(mid_ch, mid_ch, dilate=dilate)) | /root-tissue-seg-package-1.0.7.tar.gz/root-tissue-seg-package-1.0.7/rts_package/models/unet_utils.py | 0.731634 | 0.304584 | unet_utils.py | pypi |
import numpy as np
from cmath import exp, pi
from itertools import product
from typing import Union
import root_tomography.meas_statistics as stat
from root_tomography.entity import State, Process, part_trace, rand_unitary
from root_tomography.tools import kron3d, extend, base2povm, meas_matrix
rng = np.random.default_rng()
class Experiment:
dim = None
entity = None
stat_pkg = "auto"
proto = None
nshots = None
clicks = None
_vec_proto = None
_vec_nshots = None
_vec_clicks = None
def __init__(self, dim: int, entity, stats: Union[str, stat.Statistics] = "auto"):
self.dim = dim
self.entity = entity
if self.entity.__name__ not in ["State", "Process"]:
raise ValueError(
"Unknown entity: '{}'\n Only State, Process are available".format(entity.__name__))
if type(stats) is str:
stats = stats.lower()
st_allowed = list(stat.BUILD_IN.keys())
st_allowed.append("auto")
if stats not in st_allowed:
raise ValueError("Unknown statistics type: {}\n Available types: {}".format(stats, ", ".join(st_allowed)))
self.stat_pkg = stats
def set_data(self, proto=None, nshots=None, clicks=None):
if proto is not None:
if self.entity is Process:
pass
if type(proto) is not list:
proto = [proto[j, :, :] for j in range(proto.shape[0])]
proto = [extend(elem, 3) for elem in proto]
self.proto = proto
self._vec_proto = None
if clicks is not None:
if type(clicks) is not list:
clicks = [clicks[j] for j in range(clicks.shape[0])]
self.clicks = clicks
self._vec_clicks = None
if nshots is not None:
if type(nshots) is np.ndarray:
nshots = list(nshots)
self.nshots = nshots
self._vec_nshots = None
if self.proto is not None and self.nshots is not None:
if type(self.nshots) is not list:
self.nshots = nshots_divide(self.nshots, len(self.proto))
elif len(self.nshots) != len(self.proto):
raise ValueError("Length of nshots array does not match length of proto array")
return self
@property
def vec_proto(self) -> np.ndarray:
if self._vec_proto is None and self.proto is not None:
self._vec_proto = meas_matrix(self.proto)
return self._vec_proto
@property
def vec_nshots(self) -> np.ndarray:
if self._vec_nshots is None and self.nshots is not None and self.proto is not None:
n = [np.full((elem.shape[0],), n) for elem, n in zip(self.proto, self.nshots)]
self._vec_nshots = np.concatenate(tuple(n))
return self._vec_nshots
@property
def vec_clicks(self) -> np.ndarray:
if self._vec_clicks is None and self.clicks is not None:
self._vec_clicks = np.concatenate(tuple(self.clicks))
return self._vec_clicks
def stat(self) -> stat.Statistics:
if type(self.stat_pkg) is str:
if self.stat_pkg == "auto":
if any(np.isinf(self.nshots)):
self.set_data(nshots=np.ones(self.nshots.shape))
self.stat_pkg = "asymptotic"
else:
imat = np.eye(self.dim)
if all([elem.shape[0] == 1 for elem in self.proto]):
self.stat_pkg = "binomial"
elif self.entity is State and \
all([np.allclose(np.sum(elem, axis=0), imat) for elem in self.proto]):
self.stat_pkg = "polynomial"
elif self.entity is Process and \
all([np.allclose(part_trace(np.sum(elem, axis=0), [self.dim, self.dim], 0), imat) for elem in
self.proto]):
self.stat_pkg = "polynomial"
else:
raise ValueError("Failed to determine statistics type. Please, specify stat_type manually.")
self.stat_pkg = stat.BUILD_IN[self.stat_pkg]
return self.stat_pkg
def get_probs_dm(self, dm: np.ndarray, tol=0.0) -> np.ndarray:
p = np.abs(self.vec_proto @ dm.reshape((-1,), order="F"))
p[p < tol] = tol
return p
def get_probs_sq(self, sq: np.ndarray, tol=0.0) -> np.ndarray:
return self.get_probs_dm(sq @ sq.conj().T, tol)
def nkp(self, dm=None):
n = self.vec_nshots
k = self.vec_clicks
if dm is None:
return n, k
else:
return n, k, self.get_probs_dm(dm, 1e-15)
# Sampling
def simulate(self, dm: np.ndarray):
clk = []
for elem, n in zip(self.proto, self.nshots):
probs = np.abs(meas_matrix(elem) @ dm.reshape((-1,), order="F"))
clk.append(self.stat().sample(n, probs))
self._vec_clicks = None
self.set_data(clicks=clk)
# Likelihood
def logL_dm(self, dm: np.ndarray) -> float:
n, k, p = self.nkp(dm)
return self.stat().logL(n, k, p)
def logL_sq(self, sq: np.ndarray) -> float:
return self.logL_dm(sq @ sq.conj().T)
def dlogL_sq(self, sq: np.ndarray) -> np.ndarray:
n, k, p = self.nkp(sq @ sq.conj().T)
b = self.stat().dlogL(n, k, p)
bmat = self.vec_proto
return 2 * np.reshape(bmat.conj().T @ b, (self.dim, -1), order="F") @ sq
def logL_eq_mu(self) -> float:
n, k = self.nkp()
return self.stat().logL_mu(n, k)
def logL_eq_jmat_dm(self, dm) -> np.ndarray:
n, k, p = self.nkp(dm)
b, b0 = self.stat().logL_jmat(n, k, p)
bmat = self.vec_proto
jmat = np.reshape(bmat.conj().T @ b, (self.dim, self.dim), order="F")
if b0 != 0:
jmat += b0 * np.eye(self.dim)
return jmat
# Chi-squared
def chi2_dm(self, dm: np.ndarray):
n, k, p = self.nkp(dm)
return self.stat().chi2(n, k, p)
def deg_f_rank(self, rank):
nu = self.stat().deg_f(self.clicks)
if self.entity is State:
nu_dm = (2 * self.dim - rank) * rank - 1
elif self.entity is Process:
dim2 = self.dim ** 2
nu_dm = (2 * dim2 - rank) * rank - dim2
else:
raise ValueError("Invalid entity")
return nu - nu_dm
def nshots_divide(n, m, method="total_int"):
if np.floor(n) != n:
raise ValueError("Total shots number should be an integer")
if np.isinf(n):
return [np.inf] * m
if method == "total":
nshots = np.full((m,), n / m)
elif method == "total_int":
nshots = np.full((m,), np.floor(n / m))
nshots[:int(n - np.sum(nshots))] += 1
elif method == "equal":
nshots = np.full((m,), n)
else:
raise ValueError("Invalid division method")
return list(nshots)
def proto_measurement(ptype: str, dim=None, modifier="", num=None, nsub=1):
ptype = ptype.lower()
modifier = modifier.lower()
if ptype == "mub":
proto = [base2povm(base) for base in get_mubs(dim)]
elif ptype == "tetra":
proto = [base2povm(base) for base in get_tetra()]
elif ptype == "random_bases":
proto = [base2povm(rand_unitary(dim)) for _ in range(num)]
elif ptype == "random_projectors":
proto = [extend(State.random(dim, 1).dm, 3) for _ in range(num)]
else:
raise ValueError("Unknown measurement protocol type '{}'".format(ptype))
if modifier[:8] == "operator":
if modifier == "operator":
proto = np.concatenate(tuple(proto), axis=0)
else:
idx = int(modifier[8:])
proto = [extend(elem[idx, :, :], 3) for elem in proto]
if type(proto) is not list:
proto = [proto[j, :, :] for j in range(proto.shape[0])]
if nsub > 1:
proto_0 = proto.copy()
for js in range(1, nsub):
proto = [kron3d(p1, p2) for p1, p2 in product(proto, proto_0)]
return proto
def get_mubs(dim: int):
bases = [np.eye(dim)]
if dim == 2:
bases.append(np.array([[1, 1], [1, -1]]) / np.sqrt(2))
bases.append(np.array([[1, 1], [1j, -1j]]) / np.sqrt(2))
elif dim == 3:
w0 = 1 / np.sqrt(3)
w1 = exp(1j * 2 * pi / 3) / np.sqrt(3)
w2 = exp(1j * 4 * pi / 3) / np.sqrt(3)
bases.append(np.array([[w0, w0, w0], [w0, w1, w2], [w0, w2, w1]]))
bases.append(np.array([[w0, w0, w0], [w1, w2, w0], [w1, w0, w2]]))
bases.append(np.array([[w0, w0, w0], [w2, w1, w0], [w2, w0, w1]]))
elif dim == 4:
w0 = 1 / 2
w1 = 1j / 2
w2 = -1 / 2
w3 = -1j / 2
bases.append(np.array([[w0, w0, w0, w0], [w0, w0, w2, w2], [w0, w2, w2, w0], [w0, w2, w0, w2]]))
bases.append(np.array([[w0, w0, w0, w0], [w2, w2, w0, w0], [w3, w1, w1, w3], [w3, w1, w3, w1]]))
bases.append(np.array([[w0, w0, w0, w0], [w3, w3, w1, w1], [w3, w1, w1, w3], [w2, w0, w2, w0]]))
bases.append(np.array([[w0, w0, w0, w0], [w3, w3, w1, w1], [w2, w0, w2, w0], [w3, w1, w1, w3]]))
elif dim == 5:
w0 = 1 / np.sqrt(5)
w1 = exp(1j * 2 * pi / 5) / np.sqrt(5)
w2 = exp(1j * 4 * pi / 5) / np.sqrt(5)
w3 = exp(1j * 6 * pi / 5) / np.sqrt(5)
w4 = exp(1j * 8 * pi / 5) / np.sqrt(5)
bases.append(np.array([[w0,w0,w0,w0,w0], [w0,w1,w2,w3,w4], [w0,w2,w4,w1,w3], [w0,w3,w1,w4,w2], [w0,w4,w3,w2,w1]]))
bases.append(np.array([[w0,w0,w0,w0,w0], [w1,w2,w3,w4,w0], [w4,w1,w3,w0,w2], [w4,w2,w0,w3,w1], [w1,w0,w4,w3,w2]]))
bases.append(np.array([[w0,w0,w0,w0,w0], [w2,w3,w4,w0,w1], [w3,w0,w2,w4,w1], [w3,w1,w4,w2,w0], [w2,w1,w0,w4,w3]]))
bases.append(np.array([[w0,w0,w0,w0,w0], [w3,w4,w0,w1,w2], [w2,w4,w1,w3,w0], [w2,w0,w3,w1,w4], [w3,w2,w1,w0,w4]]))
bases.append(np.array([[w0,w0,w0,w0,w0], [w4,w0,w1,w2,w3], [w1,w3,w0,w2,w4], [w1,w4,w2,w0,w3], [w4,w3,w2,w1,w0]]))
else:
raise ValueError("The only available MUBs are for dimensions 2, 3, 4, 5")
return bases
def get_tetra():
bases = []
ap = np.sqrt((1 + 1 / np.sqrt(3)) / 2)
am = np.sqrt((1 - 1 / np.sqrt(3)) / 2)
w1 = exp(1j * 1 * pi / 4)
w3 = exp(1j * 3 * pi / 4)
w5 = exp(1j * 5 * pi / 4)
w7 = exp(1j * 7 * pi / 4)
bases.append(np.array([[ap, -am], [am * w1, ap * w1]]))
bases.append(np.array([[am, -ap], [ap * w3, am * w3]]))
bases.append(np.array([[ap, -am], [am * w5, ap * w5]]))
bases.append(np.array([[am, -ap], [ap * w7, am * w7]]))
return bases | /root_tomography-0.7-py3-none-any.whl/root_tomography/experiment.py | 0.791378 | 0.233772 | experiment.py | pypi |
import numpy as np
from abc import ABC, abstractmethod
from typing import Callable
from root_tomography.tools import uprint
class Optimizer(ABC):
display = True
def set_options(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
return self
@abstractmethod
def run(self, **kwargs):
pass
class FixedPoint(Optimizer):
max_iter = int(1e6)
tol = 1e-8
reg_coeff = 0.5
def run(self, x0: np.ndarray, f_val: Callable):
nb = 0
if self.display:
print("Optimization: fixed point iteration method")
nb = uprint("Starting optimization")
x = x0
iter = 0
for iter in range(self.max_iter):
xp = x
x = (1 - self.reg_coeff) * f_val(xp) + self.reg_coeff * xp
dx = np.linalg.norm(xp - x, ord="fro")
stop_iter = dx < self.tol
if self.display and (np.mod(iter, self.display) == 0 or stop_iter):
nb = uprint("Iteration {} \t\t Delta {:.2e}".format(iter + 1, dx), nb)
if stop_iter:
break
if self.display:
uprint("", end="\n")
info = {"iter": iter + 1}
return x, info
class AutoRank(Optimizer):
significance_level = 0.05
def run(self, r_max: int, f_data: Callable):
if self.display:
print("=== Automatic rank estimation ===")
r = 0
pval_red = False
info = [None] * r_max
for idx in range(r_max):
r = idx + 1
if self.display:
print(f"=> Try rank {r:d}")
info[idx] = f_data(r)
if np.isnan(info[idx]["pval"]) or info[idx]["pval"] > self.significance_level:
break
if idx > 0 and info[idx]["pval"] < info[idx - 1]["pval"]:
pval_red = True
r = r - 1
break
if self.display:
if info[r - 1]["pval"] > self.significance_level:
print(f"=> Rank {r:d} is statistically significant at significance level {self.significance_level:.5f}. Procedure terminated.")
elif pval_red:
print(f"=> P-value is maximal ({info[r-1]['pval']:.5f}) for rank {r:d}. Procedure terminated.")
else:
print(f"=> Failed to determine optimal rank. Maximal rank {r_max:d} is taken")
return info[r - 1], info | /root_tomography-0.7-py3-none-any.whl/root_tomography/optimizer.py | 0.656108 | 0.336631 | optimizer.py | pypi |
import numpy as np
class State:
dim = None
_dm = None
_root = None
_rank = None
@classmethod
def from_root(cls, root: np.ndarray):
state = cls()
state.dim = root.shape[0]
state._root = root
state._rank = root.shape[1]
return state
@classmethod
def from_dm(cls, dm: np.ndarray):
state = cls()
state.dim = dm.shape[0]
state._dm = dm
return state
@classmethod
def random(cls, dim: int, rank=None):
if not rank:
rank = dim
root = np.random.normal(size=(dim, rank)) + 1j * np.random.normal(size=(dim, rank))
root = root / np.linalg.norm(root, "fro")
state = cls.from_root(root)
state._rank = rank
return state
@property
def root(self) -> np.ndarray:
if self._root is None and self._dm is not None:
self._root = self.purify(self._dm, self.rank)
return self._root
@property
def dm(self) -> np.ndarray:
if self._dm is None and self._root is not None:
self._dm = self._root @ self._root.conj().T
return self._dm
@property
def rank(self):
if self._rank is None and self._dm is not None:
self._rank = np.linalg.matrix_rank(self._dm, hermitian=True)
return self._rank
@staticmethod
def purify(a: np.ndarray, rank=None) -> np.ndarray:
p, u = np.linalg.eigh(a)
c = u * np.sqrt(project_to_simplex(p))
c = c[:, ::-1]
if rank:
c = c[:, :rank]
return c
@classmethod
def fidelity(cls, s1, s2):
if type(s1) is not cls or type(s2) is not cls:
raise ValueError("Inputs must be of type '{}'".format(cls.__name__))
dm1 = s1.dm
dm1 = dm1 / np.trace(dm1)
dm2 = s2.dm
dm2 = dm2 / np.trace(dm2)
if s1.rank == 1 or s2.rank == 1:
return np.abs(np.trace(dm1 @ dm2))
dm1sq = cls.purify(dm1)
lam = project_to_simplex(np.linalg.eigvalsh(dm1sq.conj().T @ dm2 @ dm1sq))
f = np.abs(np.sum(np.sqrt(lam)) ** 2)
return f
def __eq__(self, other):
return np.allclose(self.dm, other.dm)
def part_trace(dm, dim, sind):
dms = 0
base1 = np.eye(dim[0])
base2 = np.eye(dim[1])
for j in range(dim[sind]):
if sind == 0:
vec = np.kron(base1[:, j], base2)
else:
vec = np.kron(base1, base2[:, j])
dms = dms + vec.conj().T @ dm @ vec
return dms
def project_to_simplex(p: np.ndarray, maintain_sum=True):
d = len(p)
ps = sum(p) if maintain_sum else 1
srt_idx = p.argsort()[::-1]
p = p[srt_idx]
mu = (np.cumsum(p) - ps) / np.arange(1, d + 1)
idx = np.where(p - mu > 0)[0][-1]
p = p - mu[idx]
p[p < 0] = 0
p[srt_idx] = p.copy()
return p
class Process(State):
dim2 = None
_kraus = None
_is_tp = True
@classmethod
def from_kraus(cls, kraus: np.ndarray):
process = cls()
process.dim = kraus[0].shape[0]
process.dim2 = process.dim ** 2
process._kraus = kraus
process._rank = len(kraus)
return process
@classmethod
def from_chi(cls, chi: np.ndarray):
process = cls()
process.dim2 = chi.shape[0]
process.dim = int(np.sqrt(process.dim2))
process._dm = chi
return process
@classmethod
def from_root(cls, root: np.ndarray):
process = super().from_root(root)
process.dim2 = process.dim
process.dim = int(np.sqrt(process.dim2))
return process
@classmethod
def random(cls, dim: int, rank=1, trace_preserving=True):
if trace_preserving:
u = rand_unitary(dim * rank)
u = u[:, :dim]
kraus = u.reshape(rank, dim, dim)
process = cls.from_kraus(kraus)
else:
process = super().random(dim, rank)
process._is_tp = trace_preserving
return process
@property
def chi(self):
if self._dm is None:
e = self.root
self._dm = e @ e.conj().T
return self._dm
@property
def kraus(self):
if self._kraus is None:
e = self.root
self._kraus = e.reshape(self.dim, self.dim, self.rank).transpose(2, 1, 0)
return self._kraus
@property
def root(self):
if self._root is None:
if self._kraus is not None:
self._root = self._kraus.transpose(2, 1, 0).reshape(self.dim2, self.rank)
else:
self._root = super().root
return self._root
def rand_unitary(dim: int) -> np.ndarray:
q, r = np.linalg.qr(np.random.normal(size=(dim, dim)) + 1j*np.random.normal(size=(dim, dim)))
r = np.diag(r)
return q @ np.diag(r / np.abs(r)) | /root_tomography-0.7-py3-none-any.whl/root_tomography/entity.py | 0.731922 | 0.436982 | entity.py | pypi |
import numpy as np
from typing import Union
from root_tomography.entity import State
from root_tomography.experiment import Experiment
from root_tomography.optimizer import FixedPoint, AutoRank
from root_tomography.tools import pinv, pval
def reconstruct_state(
ex: Experiment,
rank: Union[int, str] = "auto",
significance_level: float = 0.05,
get_stats: bool = False,
init: Union[str, np.ndarray] = "pinv",
reg_coeff: float = 0.5,
tol: float = 1e-8,
max_iter: int = int(1e6),
display: Union[int, bool] = False,
return_rinfo: bool = False
):
args = locals()
dim = ex.dim
if rank == "auto":
optim = AutoRank()
optim.set_options(display=display, significance_level=significance_level)
rinfo, data_r = optim.run(dim, lambda r: rank_fun(args, r))
state = rinfo["state"]
del rinfo["state"]
rinfo.update({"data_r": data_r})
if return_rinfo:
return state, rinfo
else:
return state
elif rank == "full":
rank = dim
if rank < 1 or rank > dim:
raise ValueError("Density matrix rank should be between 1 and Hilbert space dimension")
if init == "pinv":
p_est = ex.vec_clicks / ex.vec_nshots
psi = pinv(ex.vec_proto, p_est, rank=rank).root
else:
psi = State.purify(init, rank)
optim = FixedPoint()
optim.set_options(display=display, max_iter=max_iter, tol=tol, reg_coeff=reg_coeff)
mu_inv = 1 / ex.logL_eq_mu()
f_val = lambda sq: mu_inv * ex.logL_eq_jmat_dm(sq @ sq.conj().T / np.trace(sq.conj().T @ sq)) @ sq
psi, optim_info = optim.run(psi, f_val)
state = State.from_root(psi / np.sqrt(np.trace(psi.conj().T @ psi)))
rinfo = {
"optimizer": optim,
"iter": optim_info["iter"],
"rank": rank
}
if get_stats:
return_rinfo = True
chi2 = ex.chi2_dm(state.dm)
df = ex.deg_f_rank(rank)
rinfo.update({"chi2": chi2, "df": df, "pval": pval(chi2, df)})
if return_rinfo:
return state, rinfo
else:
return state
def rank_fun(args, r):
args["rank"] = r
args["get_stats"] = True
state, data = reconstruct_state(**args)
data.update({"state": state})
return data | /root_tomography-0.7-py3-none-any.whl/root_tomography/estimator.py | 0.777258 | 0.482978 | estimator.py | pypi |
import numpy as np
from scipy.linalg import null_space
from typing import Union
from warnings import warn
from root_tomography.entity import State, Process
from root_tomography.experiment import Experiment, nshots_divide
from root_tomography.tools import extend
def infomatrix(entity: Union[State, Process], ex: Experiment, rank: Union[str, int] = "entity"):
dm = entity.dm
dm_norm = np.trace(dm)
if rank == "entity":
psi = entity.root
else:
psi = State.purify(dm, rank)
# Find close state with no zeros probabilities
prob = []
p_tol = 1e-10
n_tries = 100
for j in range(n_tries):
prob = ex.get_probs_sq(psi)
if np.all(prob > p_tol):
break
if j == n_tries - 1:
warn("Failed to find non-singular state")
else:
psi += (np.random.normal(size=psi.shape) + 1j * np.random.normal(size=psi.shape)) * np.sqrt(p_tol)
psi = psi / np.sqrt(np.trace(psi.conj().T @ psi) / dm_norm)
# Calculate Fisher information matrix
h = 0
operators = [extend(elem, 3) for elem in ex.proto]
operators = np.concatenate(tuple(operators), axis=0)
nshots = ex.vec_nshots
for elem, n, p in zip(operators, nshots, prob):
a = np.reshape(elem @ psi, (-1,), order="F")
a = np.concatenate((np.real(a), np.imag(a)))
h = h + ex.stat().fisher_information(n, p) * np.outer(a, a)
h = 4 * h
return h
def bound(entity: Union[State, Process], ex: Experiment, rank: Union[str, int] = "entity"):
dm = entity.dm
if rank == "entity":
rank = entity.rank
psi = entity.root
else:
psi = State.purify(dm, rank)
h = infomatrix(entity, ex, rank=rank)
constraints = []
sh, uh = np.linalg.eigh(h)
# Normalization constraints
if type(entity) is State:
psi_vec = np.reshape(psi, (-1,), order="F")
constraints.append(np.concatenate((np.real(psi_vec), np.imag(psi_vec))))
elif type(entity) is Process:
pass
# Phase insensitivity constraints
tol = max(sh) * 1e-10
idx = np.where(sh < tol)[0]
if len(idx) > rank ** 2:
warn("Information matrix has more than r^2 zero eigenvalues")
sh[idx] = tol
idx = idx[:rank ** 2]
constraints += [uh[:, j] for j in idx]
# Find variances
constraints = [extend(constraint, 2).T for constraint in constraints]
constraints = np.concatenate(tuple(constraints), axis=1)
q = null_space(constraints.T)
var_sq = q.T @ uh @ np.diag(1 / np.sqrt(sh))
var = np.linalg.svd(var_sq, compute_uv=False) ** 2
fid_d = var / np.real(np.trace(entity.dm))
return fid_d
def lossfun(entity: Union[State, Process], ex: Experiment, *args, **kwargs):
if ex.nshots is None:
ex.set_data(nshots=nshots_divide(1, len(ex.proto), "total"))
df = sum(bound(entity, ex, *args, **kwargs))
return df * sum(ex.nshots) | /root_tomography-0.7-py3-none-any.whl/root_tomography/bound.py | 0.816662 | 0.349255 | bound.py | pypi |
from abc import ABC, abstractmethod
import numpy as np
from typing import Tuple
rng = np.random.default_rng()
class Statistics(ABC):
@staticmethod
@abstractmethod
def sample(n: int, p: np.ndarray) -> np.ndarray:
pass
@staticmethod
@abstractmethod
def logL(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> float:
pass
@staticmethod
@abstractmethod
def dlogL(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> np.ndarray:
pass
@staticmethod
@abstractmethod
def logL_mu(n: np.ndarray, k: np.ndarray) -> float:
pass
@staticmethod
@abstractmethod
def logL_jmat(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> Tuple[np.ndarray, float]:
pass
@staticmethod
@abstractmethod
def chi2(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> float:
pass
@staticmethod
@abstractmethod
def deg_f(clicks: np.ndarray) -> int:
pass
@staticmethod
@abstractmethod
def fisher_information(n: np.ndarray, p: np.ndarray) -> np.ndarray:
pass
class Polynomial(Statistics):
@staticmethod
def sample(n: int, p: np.ndarray) -> np.ndarray:
if abs(sum(p) - 1) > 1e-8:
raise ValueError(
"For simulating polynomial statistics probabilities in each measurement should sum to unity")
p = p / sum(p)
if len(p) == 2:
k = np.array([0, 0])
k[0] = rng.binomial(n, p[0])
k[1] = n - k[0]
else:
k = rng.multinomial(n, p)
return k
@staticmethod
def logL(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> float:
return float(np.sum(k * np.log(p)))
@staticmethod
def dlogL(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> np.ndarray:
return k / p
@staticmethod
def logL_mu(n: np.ndarray, k: np.ndarray) -> float:
return float(np.sum(k))
@staticmethod
def logL_jmat(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> Tuple[np.ndarray, float]:
return k / p, 0
@staticmethod
def chi2(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> float:
ne = p * n
no = k
return float(np.sum((ne - no)**2 / ne))
@staticmethod
def deg_f(clicks: np.ndarray) -> int:
return sum([len(k) - 1 for k in clicks])
@staticmethod
def fisher_information(n: np.ndarray, p: np.ndarray) -> np.ndarray:
return n / p
class Binomial(Statistics):
@staticmethod
def sample(n: int, p: np.ndarray) -> np.ndarray:
if len(p) > 1:
raise ValueError(
"Only a single result per measurement is allowed for binomial statistics simulation")
return rng.binomial(n, p)
@staticmethod
def logL(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> float:
return float(np.sum(k * np.log(p) + (n - k) * np.log(1 - p)))
@staticmethod
def dlogL(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> np.ndarray:
return k / p - (n - k) / (1 - p)
@staticmethod
def logL_mu(n: np.ndarray, k: np.ndarray) -> float:
return float(np.sum(k))
@staticmethod
def logL_jmat(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> Tuple[np.ndarray, float]:
b = k / p - (n - k) / (1 - p)
b0 = float(np.sum((n - k) * p / (1 - p)))
return b, b0
@staticmethod
def chi2(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> float:
ne = np.concatenate((p * n, (1 - p) * n))
no = np.concatenate((k, n - k))
return float(np.sum((ne - no)**2 / ne))
@staticmethod
def deg_f(clicks: np.ndarray) -> int:
return len(clicks)
@staticmethod
def fisher_information(n: np.ndarray, p: np.ndarray) -> np.ndarray:
return n / p / (1 - p)
class Poisson(Statistics):
@staticmethod
def sample(n: int, p: np.ndarray) -> np.ndarray:
if len(p) > 1:
raise ValueError(
"Only a single result per measurement is allowed for poisson statistics simulation")
return rng.poisson(n * p)
@staticmethod
def logL(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> float:
lam = n * p
return float(np.sum(k * np.log(lam) - lam))
@staticmethod
def dlogL(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> np.ndarray:
return k / p - n
@staticmethod
def logL_mu(n: np.ndarray, k: np.ndarray) -> float:
return float(np.sum(k))
@staticmethod
def logL_jmat(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> Tuple[np.ndarray, float]:
b = k / p - n
b0 = float(np.sum(n * p))
return b, b0
@staticmethod
def chi2(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> float:
ne = p * n
no = k
return float(np.sum((ne - no) ** 2 / ne))
@staticmethod
def deg_f(clicks: np.ndarray) -> int:
return len(clicks)
@staticmethod
def fisher_information(n: np.ndarray, p: np.ndarray) -> np.ndarray:
return n / p
class PoissonUnity(Poisson):
@staticmethod
def logL_jmat(n: np.ndarray, k: np.ndarray, p: np.ndarray) -> Tuple[np.ndarray, float]:
b = k / p
b0 = 0
return b, b0
class Asymptotic(Poisson):
@staticmethod
def sample(n: int, p: np.ndarray) -> np.ndarray:
return p * n
BUILD_IN = {
"polynomial": Polynomial,
"binomial": Binomial,
"poisson": Poisson,
"poisson_unity": PoissonUnity,
"asymptotic": Asymptotic
} | /root_tomography-0.7-py3-none-any.whl/root_tomography/meas_statistics.py | 0.920603 | 0.696939 | meas_statistics.py | pypi |
class RootApproximator:
def regula_falsi(self, func, a, b, error, display_guesses=False):
"""
Approximate the root of a given function using the Regula Falsi method.
Parameters:
func (function): The function for which to find the root.
a (float): The lower bound of the interval containing the root.
b (float): The upper bound of the interval containing the root.
error (float): The desired error tolerance for the approximation.
display_guesses (bool, optional): If True, prints the guesses at each iteration. Defaults to False.
Returns:
float: The approximated root of the function.
"""
if func(a) * func(b) >= 0:
raise ValueError("Initial guesses do not bracket the root.")
c = a
num_guesses = 0
while abs(func(c)) > error:
num_guesses += 1
c = (a * func(b) - b * func(a)) / (func(b) - func(a))
if func(c) * func(a) < 0:
b = c
else:
a = c
if display_guesses:
print(f"Guess {num_guesses}: {c}")
return c
def fixed_point_iteration(self, func, x0, error, display_guesses=False):
"""
Approximate the fixed point of a given function using fixed-point iteration.
Parameters:
func (function): The function for which to find the fixed point.
x0 (float): The initial guess for the fixed point.
error (float): The desired error tolerance for the approximation.
display_guesses (bool, optional): If True, prints the guesses at each iteration. Defaults to False.
Returns:
float: The approximated fixed point of the function.
"""
x1 = func(x0)
num_guesses = 0
while abs(x1 - x0) > error:
num_guesses += 1
x0 = x1
x1 = func(x0)
if display_guesses:
print(f"Guess {num_guesses}: {x1}")
return x1
def bisection_method(self, func, a, b, error, display_guesses=False):
"""
Approximate the root of a given function using the Bisection method.
Parameters:
func (function): The function for which to find the root.
a (float): The lower bound of the interval containing the root.
b (float): The upper bound of the interval containing the root.
error (float): The desired error tolerance for the approximation.
display_guesses (bool, optional): If True, prints the guesses at each iteration. Defaults to False.
Returns:
float: The approximated root of the function.
"""
if func(a) * func(b) >= 0:
raise ValueError("Initial guesses do not bracket the root.")
c = (a + b) / 2
num_guesses = 0
while abs(func(c)) > error:
num_guesses += 1
if func(c) * func(a) < 0:
b = c
else:
a = c
c = (a + b) / 2
if display_guesses:
print(f"Guess {num_guesses}: {c}")
return c
def newton_raphson(self, func, dfunc, x0, error, display_guesses=False):
"""
Approximate the root of a given function using the Newton-Raphson method.
Parameters:
func (function): The function for which to find the root.
dfunc (function): The derivative of the function for which to find the root.
x0 (float): The initial guess for the root.
error (float): The desired error tolerance for the approximation.
display_guesses (bool, optional): If True, prints the guesses at each iteration. Defaults to False.
Returns:
float: The approximated root of the function.
"""
x1 = x0 - func(x0) / dfunc(x0)
num_guesses = 0
while abs(x1 - x0) > error:
num_guesses += 1
x0 = x1
x1 = x0 - func(x0) / dfunc(x0)
if display_guesses:
print(f"Guess {num_guesses}: {x1}")
return x1
def secant_method(self, func, x0, x1, error, display_guesses=False):
"""
Approximate the root of a given function using the Secant method.
Parameters:
func (function): The function for which to find the root.
x0 (float): The first initial guess for the root.
x1 (float): The second initial guess for the root.
error (float): The desired error tolerance for the approximation.
display_guesses (bool, optional): If True, prints the guesses at each iteration. Defaults to False.
Returns:
float: The approximated root of the function.
"""
num_guesses = 0
while abs(x1 - x0) > error:
num_guesses += 1
x2 = x1 - func(x1) * (x1 - x0) / (func(x1) - func(x0))
x0 = x1
x1 = x2
if display_guesses:
print(f"Guess {num_guesses}: {x1}")
return x1 | /rootdescent-0.1.0.tar.gz/rootdescent-0.1.0/libname/approximators.py | 0.958972 | 0.863622 | approximators.py | pypi |
class Integration:
def trapezoidal_rule(self, func, a, b, n):
"""
Approximate the definite integral of a function using the trapezoidal rule.
Parameters:
func (function): The function to integrate.
a (float): The lower bound of the integration interval.
b (float): The upper bound of the integration interval.
n (int): The number of subintervals to use for the approximation.
Returns:
float: The approximated integral value.
"""
h = (b - a) / n
result = (func(a) + func(b)) / 2
for i in range(1, n):
result += func(a + i * h)
return result * h
def simpsons_13_rule(self, func, a, b, n):
"""
Approximate the definite integral of a function using Simpson's 1/3 rule.
Parameters:
func (function): The function to integrate.
a (float): The lower bound of the integration interval.
b (float): The upper bound of the integration interval.
n (int): The number of subintervals to use for the approximation (must be even).
Returns:
float: The approximated integral value.
Raises:
ValueError: If n is not an even integer.
"""
if n % 2 != 0:
raise ValueError("n must be an even integer for Simpson's 1/3 rule.")
h = (b - a) / n
result = func(a) + func(b)
for i in range(1, n, 2):
result += 4 * func(a + i * h)
for i in range(2, n - 1, 2):
result += 2 * func(a + i * h)
return result * h / 3
def simpsons_38_rule(self, func, a, b, n):
"""
Approximate the definite integral of a function using Simpson's 3/8 rule.
Parameters:
func (function): The function to integrate.
a (float): The lower bound of the integration interval.
b (float): The upper bound of the integration interval.
n (int): The number of subintervals to use for the approximation (must be a multiple of 3).
Returns:
float: The approximated integral value.
Raises:
ValueError: If n is not a multiple of 3.
"""
if n % 3 != 0:
raise ValueError("n must be a multiple of 3 for Simpson's 3/8 rule.")
h = (b - a) / n
result = func(a) + func(b)
for i in range(1, n, 3):
result += 3 * (func(a + i * h) + func(a + (i + 1) * h))
for i in range(3, n - 2, 3):
result += 2 * func(a + i * h)
return result * 3 * h / 8 | /rootdescent-0.1.0.tar.gz/rootdescent-0.1.0/libname/intergrators.py | 0.951986 | 0.83762 | intergrators.py | pypi |
from __future__ import division, absolute_import, print_function
__version__ = '1.0.3'
import itertools
class Result(object):
"""
Result from a succesful call to a function in this module.
Attributes
----------
root : float or None
Estimated root location. `None` if it is not known.
f_root : float or None
Value of `f` evaluated at `root`. `None` if `root` is `None`.
bracket : sequence of two floats or None
Interval that brackets the root. `None` if it is not known.
f_bracket : sequence of two floats or None
Values of `f` at the endpoints of `bracket. `None` if `f_bracket` is
`None`.
iterations : int
Number of iterations performed by the algorithm.
function_calls : int
Number of calls to `f`.
"""
def __init__(self, root=None, f_root=None, bracket=None, f_bracket=None,
iterations=0, function_calls=0):
self.root = root
self.f_root = f_root
self.bracket = bracket
self.f_bracket = f_bracket
self.iterations = iterations
self.function_calls = function_calls
class IterationLimitReached(RuntimeError):
"""
Exception raised when a function in this module does not finish within the
specified maximum number of iterations.
Attributes
----------
interval : sequence of two floats
Working interval at the time the iteration limit was reached.
f_interval : sequence of two floats
Values of the `f` at the endpoints of `interval`.
function_calls : int
Number of calls to `f`.
"""
def __init__(self, message, interval, f_interval, function_calls):
super(IterationLimitReached, self).__init__(message)
self.interval = interval
self.f_interval = f_interval
self.function_calls = function_calls
def bracket_root(f, interval, growth_factor=2, maxiter=100,
f_interval=(None, None), ftol=None):
"""
Find an interval that brackets a root of a function by searching in one
direction.
Starting from an interval, it moves and expands the interval in the
direction of the second endpoint until the interval brackets a root of the
given function.
Parameters
----------
f : callable
Continuous scalar function.
interval : sequence of two floats
Starting interval. Must have non-equal endpoints, but they do not need
to be listed in order. During the search, the interval will be shifted
and expanded in the direction of ``interval[1]``.
growth_factor : float, optional
Factor by which to grow the width of the working interval between
iterations. Must be >= 1.
maxiter : int or None, optional
Maximum number of iterations. Must be nonnegative. An
:exc:`IterationLimitReached` exception will be raised if the bracket is
not found within the specified number of iterations. If `None`,
there is no maximum number of iterations.
f_interval : sequence of two of {None, float}, optional
Values of `f` at the endpoints of the interval, if known (use `None` if
a value is not known). For every known value, one fewer call to `f`
will be required.
ftol : None or float
An optional absolute tolerance for the value of `f` at a root. If
given, the algorithm will immediately return any root it happens to
discover in its execution.
Returns
-------
result : Result
Normally contains a bracket and no root. However, if `ftol` is not
`None` and a root is found, it will contain that root; in this case,
the result will also include a bracket only if one was found at the
same time as the root.
See also
--------
bisect
Notes
-----
If `ftol` is not `None` and both endpoints of the starting interval qualify
as roots, the one where the absolute value of `f` is lower is chosen as the
root.
"""
if growth_factor < 1:
raise ValueError("growth_factor cannot be less than 1")
if ftol is not None and ftol < 0:
raise ValueError("ftol cannot be negative")
if maxiter is not None and maxiter < 0:
raise ValueError("maxiter cannot be negative")
a, b = interval
if a == b:
raise ValueError("interval must have different endpoints")
f_a, f_b = f_interval
function_calls = 0
# Evaluate at endpoints if necessary
if f_a is None:
f_a = f(a)
function_calls += 1
if f_b is None:
f_b = f(b)
function_calls += 1
# Test for a root at the first endpoint (the second endpoint will be
# checked inside the main loop)
if ftol is not None and abs(f_a) <= ftol and abs(f_a) <= abs(f_b):
if f_a*f_b < 0:
return Result(root=a,
f_root=f_a,
bracket=(a,b),
f_bracket=(f_a, f_b),
iterations=0,
function_calls=function_calls)
return Result(root=a,
f_root=f_a,
iterations=0,
function_calls=function_calls)
# Test and move the interval until it brackets a root
for iteration in itertools.count(start=0):
if f_a*f_b < 0:
if ftol is not None and abs(f_b) <= ftol:
return Result(root=b,
f_root=f_b,
bracket=(a,b),
f_bracket=(f_a, f_b),
iterations=iteration,
function_calls=function_calls)
return Result(bracket=(a,b),
f_bracket=(f_a, f_b),
iterations=iteration,
function_calls=function_calls)
if ftol is not None and abs(f_b) <= ftol:
return Result(root=b,
f_root=f_b,
iterations=0,
function_calls=function_calls)
if maxiter is not None and iteration >= maxiter:
raise IterationLimitReached("failed to converge after {} "
"iterations".format(maxiter),
interval=(a,b),
f_interval=(f_a, f_b),
function_calls=function_calls)
a, b = b, b + growth_factor*(b-a)
f_a, f_b = f_b, f(b)
function_calls += 1
class NotABracketError(ValueError):
"""
Exception raised by :func:`bisect` when the interval passed as `bracket`
does not actually contain a root.
Attributes
----------
f_interval : sequence of two floats
Values of the `f` at the endpoints of the interval that is not a
bracket.
function_calls : int
Number of calls to `f`.
"""
def __init__(self, message, f_interval, function_calls):
super(NotABracketError, self).__init__(message)
self.f_interval = f_interval
self.function_calls = function_calls
def bisect(f, bracket, ftol=1e-12, maxiter=100, f_bracket=(None, None)):
"""
Find root of a function within a bracket using the bisection method.
The function must have opposite signs at the endpoints of the bracket.
Compared to SciPy's :func:`scipy.optimize.bisect` and
:func:`scipy.optimize.root_scalar` functions, this function tests for a
root by looking only at the residual (i.e., the value of `f`).
Parameters
----------
f : callable
Continuous scalar function.
bracket: sequence of two floats
An interval bracketing a root. `f` must have different signs at the two
endpoints, or a :exc:`NotABracketError` will be raised. The endpoints
do not need to be listed in order.
ftol : float, optional
Absolute tolerance for the value of `f` at the root. Must be
nonnegative.
maxiter : int or None, optional
Maximum number of iterations. Must be nonnegative. An
:exc:`IterationLimitReached` exception will be raised if the specified
tolerance is not achieved within this number of iterations. If `None`,
there is no maximum number of iterations.
f_bracket : sequence of two of {None, float}, optional
Values of `f` at the endpoints of `bracket`, if known (use `None` if a
value is not known). For every known value, one fewer call to `f` will
be required.
Returns
-------
result : Result
Contains the root and the final bracket.
See also
--------
bracket_root : Search for a bracket.
Notes
-----
The function starts by testing the endpoints of the bracket. If a root is
found at one of the endpoints of a valid bracket, no bisection iterations
are performed and the root is immediately returned. If both endpoints
qualify as roots, the one where the absolute value of `f` is lower is
returned.
"""
if ftol < 0:
raise ValueError("ftol cannot be negative")
if maxiter is not None and maxiter < 0:
raise ValueError("maxiter cannot be negative")
a, b = bracket
f_a, f_b = f_bracket
function_calls = 0
# Evaluate at endpoints if necessary
if f_a is None:
f_a = f(a)
function_calls += 1
if f_b is None:
f_b = f(b)
function_calls += 1
# Check that the bracket is valid
if f_a*f_b > 0:
raise NotABracketError("f must have different signs at the bracket "
"endpoints",
f_interval=(f_a, f_b),
function_calls=function_calls)
# Test the endpoints themselves for a root
if abs(f_a) <= ftol or abs(f_b) <= ftol:
if abs(f_a) <= abs(f_b):
root, f_root = a, f_a
else:
root, f_root = b, f_b
return Result(root=root,
f_root=f_root,
bracket=(a,b),
f_bracket=(f_a, f_b),
iterations=0,
function_calls=function_calls)
# Perform the actual bisection
for iteration in itertools.count(start=1):
if maxiter is not None and iteration > maxiter:
raise IterationLimitReached("failed to converge after {} "
"iterations".format(maxiter),
interval=(a,b),
f_interval=(f_a, f_b),
function_calls=function_calls)
m = (a + b)/2
f_m = f(m)
function_calls += 1
if f_m*f_a > 0:
a, f_a = m, f_m
else:
b, f_b = m, f_m
if abs(f_m) <= ftol:
return Result(root=m,
f_root=f_m,
bracket=(a,b),
f_bracket=(f_a, f_b),
iterations=iteration,
function_calls=function_calls) | /rootfinding-1.0.3.tar.gz/rootfinding-1.0.3/rootfinding.py | 0.956553 | 0.537345 | rootfinding.py | pypi |
# RootMD 🩺 👩🏼⚕️
Scientific reports/literate programming tool for CERN ROOT and c++. RootMD is a markdown (and other format) processor for mark up with ROOT-flavored c++ code. RootMD can execute c++ code and inject the output (from stdout, stderr) and link or embed image outputs. Provides a format for producing code + result for better documentation / notes. This is by design not a jupyter notebook, e.g. not a REPL-like environment. If you like Jupyter notebooks then use that :).
## Installation
```sh
python -m pip install rootmd
```
the you can use it with:
```
python -m rootmd <args>
```
## Features
- execute c++ code blocks via ROOT REPL
- capture stdout, stderr and inject into output
- embed (base64) or link to any image files produced
- output to HTML, Markdown (or obsidian flavored markdown), or as a presentation (via Marp)
- execute html, css, javascript code blocks (for html output) to customize output
- watch files for changes and rerun (good for iterative workflows)
## usage
```sh
usage: rootmd [-h] [--output OUTPUT]
[--format {html,md,obsidian,json,terminal}] [--embed]
[--asset-dir ASSET_DIR] [--verbosity VERBOSITY]
[--watch WATCH] [--run RUN] [--clean] [--no-exec]
input
Convert Markdown with inline c++ code to ROOT output.
positional arguments:
input input Markdown file to execute and convert
optional arguments:
-h, --help show this help message and exit
--output OUTPUT output filename default <input>.<ext> where <ext>
is determined by the chosen format, default html
--format {html,md,obsidian,json,terminal}
output format
--embed embed images as base 64
--asset-dir ASSET_DIR
specify asset output directory, paths are NOTE re-
written to support unless using obsidian format
--verbosity VERBOSITY
specify log verbosity
--watch WATCH watch a file or directory for changes
--run RUN command to run after processing a file. The
filename can be substituted into the command string
with {{file}}. Example: --run="echo {{file}}"
--clean clean artifacts, caution - should only use with
embed or asset copy to <asset-dir>
--no-exec Do not execute any code blocks, just process file
(useful for viewing and testing conversion)
```
## TODO
- [x] make it a python package (so that it can be installed via pip)
- [x] Look into poetry
- [x] start versioning
- [x] support output verbosity levels
- & make output useful
- Add comment for input block identification upon processing
- [x] add watch functionality for reprocessing a file on change
- [ ] Add some additional features
- [ ] Handle watch of multiple files, and how output filename should be named
- [x] Add terminal output using rich pretty output package
- consider iterm image support? [See here](https://iterm2.com/documentation-images.html)
- [x] Basically, markdown output on the terminal : using rich
- [x] Add prismjs for inline code
- test some failure modes when root code has errors leading to hang
- [x] add mathjax to html output for inline mathematics
- [x] "import" source files into output? So that you can write code in separate file but sow it in the output?
- add as a block option
- support for other "languages", e.g. shell
- support for ROOTJS in HTML output
- embed histogram as JSON object in HTML
- [x] clean assets in embed mode or other output path
- download + embed external JS for fully contained, offline ready single file HTML output
- [x] Load HTML / CSS defaults from a file in the package
- [x] Better style HTML output (input / output cells more like Jupyter)
- Consider adding JS for collapsing headings, output blocks etc.
- [ ] add format option to output a ROOT macro with markdown and output converted to comments
- [x] integrate usage of RNUplot for graphical output!
- [x] implement an auto-draw so that you dont have to "Print(...)" every block
- [ ] block option: run code block as a "macro"
- [x] You can already use .L, .x
- [ ] move ROOT executor into member not superclass
- [ ] auto-draw dont do it for cells without any drawing!
- [x] PDF support
- [ ] Better PDF embed (link to file) (for Firefox especially) using canvas : https://stackoverflow.com/questions/2104608/hiding-the-toolbars-surrounding-an-embedded-pdf
- [ ] Embed PDF base64 in object tag
## Feature wish list
- Cache blocks of code for faster rerun
- server for "sharing" of documents, from command line - use secret links
- Technique for RAW output?
- i.e. write out HTML Table or markup
- inline code replacement, e.g. "We ran `TString::Format("%d",tree->GetEntries())` events."
## Improve ROOT?
- Apply some default style options
-
## Known issues
- watch isnt working, especially when input not given
- something is surely broken
### ROOT REPL issues
ROOT REPL struggles with multi-line code:
- function definitions with "{" on next line does not work since it doesn't understand
```cpp
void hello() {
// this works
}
void world()
{
// this DOES NOT work
}
```
## Dependencies
RootMD itself is a pure python package which uses:
- [mistletoe](https://github.com/miyuchina/mistletoe) : python markdown parser and processor
- [rich](https://github.com/Textualize/rich) : Rich for pretty terminal output
- pyyaml : for parsing yaml front matter
- Dependencies for HTML output
- [prismjs](https://prismjs.com/) for syntax highlighting in HTML output
- [mathjax](https://www.mathjax.org/) for rendering mathematics
- [ROOT](https://root.cern.ch/) : installed on system and available on PATH
## Example : see [example_in.md](example_in.md) and [example.md](example.md)
This is a simple example of markdown processed by RootMD.
You can include any c++ code that ROOT can handle. For instance (using ROOT6)
this part of the file was processed with
```sh
rootmd example_in.md -f md -o example.md
```
```cpp
cout << "Hello from ROOT (stdout)" << endl;
cerr << "Hello from ROOT (stderr)" << endl;
```
```sh
# Block [0]
Hello from ROOT (stdout)
Hello from ROOT (stderr)
```
```cpp
TH1 * h1 = new TH1F( "hGaus", ";x;counts", 100, -5, 5 );
h1->FillRandom( "gaus", 10000 );
h1->Draw( "pe" );
h1->Draw( "same hist" );
gPad->Print( "h1.svg" );
```
```sh
# Block [1]
Info in <TCanvas::MakeDefCanvas>: created default TCanvas with name c1
Info in <TCanvas::Print>: SVG file h1.svg has been created
```

## Changelog
### v0.5.9
- Add additional executor catch
- Catch "_sigtramp"
- Catch "(no debug info)"
### v0.5.8
- Allow log level to be set from command line arguments.
- Adds parameter "--log" or "--logLevel
### v0.5.7
- Add additional catch conditions on the ROOT executor to try to prevent hanging on code errors
- Catch "Root >"
- Catch "root [N]"
- Catch "*** Break *** segmentation violation" | /rootmd-0.5.9.tar.gz/rootmd-0.5.9/README.md | 0.531453 | 0.818338 | README.md | pypi |
print(__doc__)
from rootpy.tree import Tree, TreeModel, FloatCol, IntCol
from rootpy.io import root_open
from rootpy.vector import LorentzVector
from rootpy import stl
from random import gauss, randint
f = root_open("test.root", "recreate")
# define the model
class Point(TreeModel):
x = FloatCol()
y = FloatCol()
z = FloatCol()
class Event(Point.prefix('a_'), Point.prefix('b_')):
# a_x, a_y, a_z and b_x, b_y, b_z are implicitly included here
# define vector branches
col_x = stl.vector("float")
col_y = stl.vector("float")
col_z = stl.vector("float")
col_n = IntCol()
# a TLorentzVector
p = LorentzVector
i = IntCol()
tree = Tree("test", model=Event)
# fill the tree
for i in range(10):
tree.a_x = gauss(.5, 1.)
tree.a_y = gauss(.3, 2.)
tree.a_z = gauss(13., 42.)
tree.b_x = gauss(.5, 1.)
tree.b_y = gauss(.3, 2.)
tree.b_z = gauss(13., 42.)
n = randint(1, 10)
for j in range(n):
tree.col_x.push_back(gauss(.5, 1.))
tree.col_y.push_back(gauss(.3, 2.))
tree.col_z.push_back(gauss(13., 42.))
tree.col_n = n
tree.p.SetPtEtaPhiM(gauss(.5, 1.),
gauss(.5, 1.),
gauss(.5, 1.),
gauss(.5, 1.))
tree.i = i
tree.fill(reset=True)
tree.write()
f.close()
f = root_open("test.root")
tree = f.test
# define objects by prefix:
tree.define_object(name='a', prefix='a_')
tree.define_object(name='b', prefix='b_')
# define a mixin class to add functionality to a tree object
class Particle(object):
def who_is_your_daddy(self):
print("You are!")
# define collections of objects by prefix
tree.define_collection(name='particles',
prefix='col_',
size='col_n',
mix=Particle)
# loop over "events" in tree
for event in tree:
print("a.x: {0:f}".format(event.a.x))
print("b.y: {0:f}".format(event.b.y))
# loop over "particles" in current event
for p in event.particles:
print("p.x: {0:f}".format(p.x))
p.who_is_your_daddy()
print(event.p.Eta())
f.close() | /rootpy-1.0.1.tar.gz/rootpy-1.0.1/examples/tree/model.py | 0.427755 | 0.422922 | model.py | pypi |
print(__doc__)
import ROOT
import numpy as np
from rootpy.plotting import Hist, HistStack, Legend, Canvas
from rootpy.plotting.style import get_style, set_style
from rootpy.plotting.utils import draw
from rootpy.interactive import wait
import rootpy.plotting.root2matplotlib as rplt
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
# set the style
style = get_style('ATLAS')
style.SetEndErrorSize(3)
set_style(style)
# set the random seed
ROOT.gRandom.SetSeed(42)
np.random.seed(42)
# signal distribution
signal = 126 + 10 * np.random.randn(100)
signal_obs = 126 + 10 * np.random.randn(100)
# create histograms
h1 = Hist(30, 40, 200, title='Background', markersize=0, legendstyle='F')
h2 = h1.Clone(title='Signal')
h3 = h1.Clone(title='Data', drawstyle='E1 X0', legendstyle='LEP')
h3.markersize = 1.2
# fill the histograms with our distributions
h1.FillRandom('landau', 1000)
map(h2.Fill, signal)
h3.FillRandom('landau', 1000)
map(h3.Fill, signal_obs)
# set visual attributes
h1.fillstyle = 'solid'
h1.fillcolor = 'green'
h1.linecolor = 'green'
h1.linewidth = 0
h2.fillstyle = 'solid'
h2.fillcolor = 'red'
h2.linecolor = 'red'
h2.linewidth = 0
stack = HistStack([h1, h2], drawstyle='HIST E1 X0')
# plot with ROOT
canvas = Canvas(width=700, height=500)
draw([stack, h3], xtitle='Mass', ytitle='Events', pad=canvas)
# set the number of expected legend entries
legend = Legend([h1, h2, h3], leftmargin=0.45, margin=0.3)
legend.Draw()
label = ROOT.TText(0.3, 0.8, 'ROOT')
label.SetTextFont(43)
label.SetTextSize(25)
label.SetNDC()
label.Draw()
canvas.Modified()
canvas.Update()
# plot with matplotlib
set_style('ATLAS', mpl=True)
fig = plt.figure(figsize=(7, 5), dpi=100)
axes = plt.axes()
axes.xaxis.set_minor_locator(AutoMinorLocator())
axes.yaxis.set_minor_locator(AutoMinorLocator())
axes.yaxis.set_major_locator(MultipleLocator(20))
rplt.bar(stack, stacked=True, axes=axes)
rplt.errorbar(h3, xerr=False, emptybins=False, axes=axes)
plt.xlabel('Mass', position=(1., 0.), va='bottom', ha='right')
plt.ylabel('Events', position=(0., 1.), va='top', ha='right')
axes.xaxis.set_label_coords(1., -0.20)
axes.yaxis.set_label_coords(-0.18, 1.)
leg = plt.legend()
axes.text(0.3, 0.8, 'matplotlib',
verticalalignment='center', horizontalalignment='center',
transform=axes.transAxes, fontsize=20)
if not ROOT.gROOT.IsBatch():
plt.show()
# wait for you to close the ROOT canvas before exiting
wait(True) | /rootpy-1.0.1.tar.gz/rootpy-1.0.1/examples/plotting/plot_matplotlib_hist.py | 0.579876 | 0.439687 | plot_matplotlib_hist.py | pypi |
print(__doc__)
import ROOT
import numpy as np
from rootpy.plotting import Canvas, Graph
from rootpy.plotting.style import get_style, set_style
from rootpy.interactive import wait
import rootpy.plotting.root2matplotlib as rplt
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
# set the random seed
ROOT.gRandom.SetSeed(42)
np.random.seed(42)
# points
x = np.sort(np.random.random(10)) * 3500
y = np.random.random(10)
# set style for ROOT
set_style('ATLAS')
# create graph
graph = Graph(x.shape[0])
for i, (xx, yy) in enumerate(zip(x, y)):
graph.SetPoint(i, xx, yy)
# set visual attributes
graph.linecolor = 'blue'
graph.markercolor = 'blue'
graph.xaxis.SetTitle("E_{T} [GeV]")
graph.yaxis.SetTitle("d#sigma_{jet}/dE_{T,jet} [fb/GeV]")
graph.xaxis.SetRangeUser(0, 3500)
graph.yaxis.SetRangeUser(0, 1)
# plot with ROOT
canvas = Canvas()
graph.Draw("APL")
label = ROOT.TText(0.4, 0.8, "ROOT")
label.SetTextFont(43)
label.SetTextSize(25)
label.SetNDC()
label.Draw()
canvas.Modified()
canvas.Update()
# plot with matplotlib
def plot_with_matplotlib():
fig, axes = plt.subplots()
axes.plot(x, y, 'o-', markeredgewidth=0)
axes.set_xlabel(r"$E_T$ [GeV]",
horizontalalignment="right", x=1, labelpad=20)
axes.set_ylabel(r"$d\sigma_{jet}/dE_{T,jet}$ [fb/GeV]",
horizontalalignment="right", y=1, labelpad=32)
axes.set_xlim(0, 3500)
axes.set_ylim(0, 1)
return fig, axes
# plot without style
fig1, axes1 = plot_with_matplotlib()
axes1.text(0.4, 0.8, 'matplotlib (no style)',
verticalalignment='center', horizontalalignment='center',
transform=axes1.transAxes, fontsize=20)
# plot with ATLAS style
set_style('ATLAS', mpl=True)
fig2, axes2 = plot_with_matplotlib()
axes2.text(0.4, 0.8, 'matplotlib',
verticalalignment='center', horizontalalignment='center',
transform=axes2.transAxes, fontsize=20)
axes2.xaxis.set_minor_locator(AutoMinorLocator())
axes2.yaxis.set_minor_locator(AutoMinorLocator())
if not ROOT.gROOT.IsBatch():
plt.show()
# wait for you to close the canvas before exiting
wait(True) | /rootpy-1.0.1.tar.gz/rootpy-1.0.1/examples/plotting/plot_matplotlib_graph.py | 0.482429 | 0.393822 | plot_matplotlib_graph.py | pypi |
print(__doc__)
from rootpy.extern.six.moves import range
from rootpy.plotting import Hist, Hist2D, Hist3D, HistStack, Legend, Canvas
from rootpy.interactive import wait
import random
# create a simple 1D histogram with 10 constant-width bins between 0 and 1
h_simple = Hist(10, 0, 1)
print(h_simple.name)
# If the name is not specified, a UUID is used so that ROOT never complains
# about two histograms having the same name.
# Alternatively you can specify the name (and the title or any other style
# attributes) in the constructor:
h_simple = Hist(10, -4, 12, name='my hist', title='Some Data',
drawstyle='hist',
legendstyle='F',
fillstyle='/')
# fill the histogram
for i in range(1000):
# all ROOT CamelCase methods are aliased by equivalent snake_case methods
# so you can call fill() instead of Fill()
h_simple.Fill(random.gauss(4, 3))
# easily set visual attributes
h_simple.linecolor = 'blue'
h_simple.fillcolor = 'green'
h_simple.fillstyle = '/'
# attributes may be accessed in the same way
print(h_simple.name)
print(h_simple.title)
print(h_simple.markersize)
# plot
canvas = Canvas(width=700, height=500)
canvas.SetLeftMargin(0.15)
canvas.SetBottomMargin(0.15)
canvas.SetTopMargin(0.10)
canvas.SetRightMargin(0.05)
h_simple.Draw()
# create the legend
legend = Legend([h_simple], pad=canvas,
header='Header',
leftmargin=0.05,
rightmargin=0.5)
legend.Draw()
# 2D and 3D histograms are handled in the same way
# the constructor arguments are repetitions of #bins, left bound, right bound.
h2d = Hist2D(10, 0, 1, 50, -40, 10, name='2d hist')
h3d = Hist3D(3, -1, 4, 10, -1000, -200, 2, 0, 1, name='3d hist')
# variable-width bins may be created by passing the bin edges directly:
h1d_variable = Hist([1, 4, 10, 100])
h2d_variable = Hist2D([2, 4, 7, 100, 200], [-100, -50, 0, 10, 20])
h3d_variable = Hist3D([1, 3, 10], [20, 50, 100], [-10, -5, 10, 20])
# variable-width and constant-width bins can be mixed:
h2d_mixed = Hist2D([2, 10, 30], 10, 1, 5)
# wait for you to close all open canvases before exiting
# wait() will have no effect if ROOT is in batch mode:
# ROOT.gROOT.SetBatch(True)
wait() | /rootpy-1.0.1.tar.gz/rootpy-1.0.1/examples/plotting/plot_hist.py | 0.487307 | 0.49762 | plot_hist.py | pypi |
import asyncio
import functools
import json
import logging
import math
import multiprocessing
import os
import shlex
import subprocess
import textwrap
from concurrent.futures import ThreadPoolExecutor
from typing import Tuple
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from pymediainfo import MediaInfo
from pyrogram.types import Message
max_workers = multiprocessing.cpu_count() * 5
exc_ = ThreadPoolExecutor(max_workers=max_workers)
def get_text(message: Message) -> [None, str]:
"""Extract Text From Commands"""
text_to_return = message.text
if message.text is None:
return None
if " " in text_to_return:
try:
return message.text.split(None, 1)[1]
except IndexError:
return None
else:
return None
def humanbytes(size):
"""Convert Bytes To Bytes So That Human Can Read It"""
if not size:
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def run_in_exc(f):
@functools.wraps(f)
async def wrapper(*args, **kwargs):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(exc_, lambda: f(*args, **kwargs))
return wrapper
def time_formatter(milliseconds: int) -> str:
"""Time Formatter"""
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
async def progress(current, total, message, start, type_of_ps, file_name=None):
"""Progress Bar For Showing Progress While Uploading / Downloading File - Normal"""
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["▰" for i in range(math.floor(percentage / 10))]),
"".join(["▱" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await message.edit(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
async def cb_progress(current, total, cb, start, type_of_ps, file_name=None):
"""Progress Bar For Showing Progress While Uploading / Downloading File - Inline"""
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["▰" for i in range(math.floor(percentage / 10))]),
"".join(["▱" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await cb.edit_message_text(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit_message_text("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
async def add_text_img(image_path, text):
font_size = 12
stroke_width = 1
if ";" in text:
upper_text, lower_text = text.split(";")
else:
upper_text = text
lower_text = ""
img = Image.open(image_path).convert("RGBA")
img_info = img.info
image_width, image_height = img.size
font = ImageFont.truetype(
font="naya/resources/fonts/default.ttf",
size=int(image_height * font_size) // 100,
)
draw = ImageDraw.Draw(img)
char_width, char_height = font.getsize("A")
chars_per_line = image_width // char_width
top_lines = textwrap.wrap(upper_text, width=chars_per_line)
bottom_lines = textwrap.wrap(lower_text, width=chars_per_line)
if top_lines:
y = 10
for line in top_lines:
line_width, line_height = font.getsize(line)
x = (image_width - line_width) / 2
draw.text(
(x, y),
line,
fill="white",
font=font,
stroke_width=stroke_width,
stroke_fill="black",
)
y += line_height
if bottom_lines:
y = image_height - char_height * len(bottom_lines) - 15
for line in bottom_lines:
line_width, line_height = font.getsize(line)
x = (image_width - line_width) / 2
draw.text(
(x, y),
line,
fill="white",
font=font,
stroke_width=stroke_width,
stroke_fill="black",
)
y += line_height
final_image = os.path.join("memify.webp")
img.save(final_image, **img_info)
return final_image
def get_arg(message: Message):
msg = message.text
msg = msg.replace(" ", "", 1) if msg[1] == " " else msg
split = msg[1:].replace("\n", " \n").split(" ")
if " ".join(split[1:]).strip() == "":
return ""
return " ".join(split[1:])
def get_args(message: Message):
try:
message = message.text
except AttributeError:
pass
if not message:
return False
message = message.split(maxsplit=1)
if len(message) <= 1:
return []
message = message[1]
try:
split = shlex.split(message)
except ValueError:
return message
return list(filter(lambda x: len(x) > 0, split))
async def run_cmd(cmd: str) -> Tuple[str, str, int, int]:
"""Run Commands"""
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
async def bash(cmd):
process = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
err = stderr.decode().strip()
out = stdout.decode().strip()
return out, err
def generate_meme(
image_path,
top_text,
bottom_text="",
font_path="naya/resources/fonts/impact.ttf",
font_size=11,
):
"""Make Memes Like A Pro"""
im = Image.open(image_path)
draw = ImageDraw.Draw(im)
image_width, image_height = im.size
font = ImageFont.truetype(font=font_path, size=int(image_height * font_size) // 100)
top_text = top_text.upper()
bottom_text = bottom_text.upper()
char_width, char_height = font.getsize("A")
chars_per_line = image_width // char_width
top_lines = textwrap.wrap(top_text, width=chars_per_line)
bottom_lines = textwrap.wrap(bottom_text, width=chars_per_line)
y = 9
for line in top_lines:
line_width, line_height = font.getsize(line)
x = (image_width - line_width) / 2
draw.text((x - 2, y - 2), line, font=font, fill="black")
draw.text((x + 2, y - 2), line, font=font, fill="black")
draw.text((x + 2, y + 2), line, font=font, fill="black")
draw.text((x - 2, y + 2), line, font=font, fill="black")
draw.text((x, y), line, fill="white", font=font)
y += line_height
y = image_height - char_height * len(bottom_lines) - 14
for line in bottom_lines:
line_width, line_height = font.getsize(line)
x = (image_width - line_width) / 2
draw.text((x - 2, y - 2), line, font=font, fill="black")
draw.text((x + 2, y - 2), line, font=font, fill="black")
draw.text((x + 2, y + 2), line, font=font, fill="black")
draw.text((x - 2, y + 2), line, font=font, fill="black")
draw.text((x, y), line, fill="white", font=font)
y += line_height
ok = "memeimg.webp"
im.save(ok, "WebP")
async def convert_to_image(message, client) -> [None, str]:
"""Convert Most Media Formats To Raw Image"""
if not message:
return None
if not message.reply_to_message:
return None
final_path = None
if not (
message.reply_to_message.video
or message.reply_to_message.photo
or message.reply_to_message.sticker
or message.reply_to_message.media
or message.reply_to_message.animation
or message.reply_to_message.audio
):
return None
if message.reply_to_message.photo:
final_path = await message.reply_to_message.download()
elif message.reply_to_message.sticker:
if message.reply_to_message.sticker.mime_type == "image/webp":
final_path = "webp_to_png_s_proton.png"
path_s = await message.reply_to_message.download()
im = Image.open(path_s)
im.save(final_path, "PNG")
else:
path_s = await client.download_media(message.reply_to_message)
final_path = "lottie_proton.png"
cmd = (
f"lottie_convert.py --frame 0 -if lottie -of png {path_s} {final_path}"
)
await run_cmd(cmd)
elif message.reply_to_message.audio:
thumb = message.reply_to_message.audio.thumbs[0].file_id
final_path = await client.download_media(thumb)
elif message.reply_to_message.video or message.reply_to_message.animation:
final_path = "fetched_thumb.png"
vid_path = await client.download_media(message.reply_to_message)
await run_cmd(f"ffmpeg -i {vid_path} -filter:v scale=500:500 -an {final_path}")
return final_path
def resize_image(image):
im = Image.open(image)
maxsize = (512, 512)
if (im.width and im.height) < 512:
size1 = im.width
size2 = im.height
if im.width > im.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
im = im.resize(sizenew)
else:
im.thumbnail(maxsize)
file_name = "Sticker.png"
im.save(file_name, "PNG")
if os.path.exists(image):
os.remove(image)
return file_name
class Media_Info:
def data(media: str) -> dict:
"Get downloaded media's information"
found = False
media_info = MediaInfo.parse(media)
for track in media_info.tracks:
if track.track_type == "Video":
found = True
type_ = track.track_type
format_ = track.format
duration_1 = track.duration
other_duration_ = track.other_duration
duration_2 = (
f"{other_duration_[0]} - ({other_duration_[3]})"
if other_duration_
else None
)
pixel_ratio_ = [track.width, track.height]
aspect_ratio_1 = track.display_aspect_ratio
other_aspect_ratio_ = track.other_display_aspect_ratio
aspect_ratio_2 = other_aspect_ratio_[0] if other_aspect_ratio_ else None
fps_ = track.frame_rate
fc_ = track.frame_count
media_size_1 = track.stream_size
other_media_size_ = track.other_stream_size
media_size_2 = (
[
other_media_size_[1],
other_media_size_[2],
other_media_size_[3],
other_media_size_[4],
]
if other_media_size_
else None
)
dict_ = (
{
"media_type": type_,
"format": format_,
"duration_in_ms": duration_1,
"duration": duration_2,
"pixel_sizes": pixel_ratio_,
"aspect_ratio_in_fraction": aspect_ratio_1,
"aspect_ratio": aspect_ratio_2,
"frame_rate": fps_,
"frame_count": fc_,
"file_size_in_bytes": media_size_1,
"file_size": media_size_2,
}
if found
else None
)
return dict_
async def resize_media(media: str, video: bool, fast_forward: bool) -> str:
if video:
info_ = Media_Info.data(media)
width = info_["pixel_sizes"][0]
height = info_["pixel_sizes"][1]
sec = info_["duration_in_ms"]
s = round(float(sec)) / 1000
if height == width:
height, width = 512, 512
elif height > width:
height, width = 512, -1
elif width > height:
height, width = -1, 512
resized_video = f"{media}.webm"
if fast_forward:
if s > 3:
fract_ = 3 / s
ff_f = round(fract_, 2)
set_pts_ = ff_f - 0.01 if ff_f > fract_ else ff_f
cmd_f = f"-filter:v 'setpts={set_pts_}*PTS',scale={width}:{height}"
else:
cmd_f = f"-filter:v scale={width}:{height}"
else:
cmd_f = f"-filter:v scale={width}:{height}"
fps_ = float(info_["frame_rate"])
fps_cmd = "-r 30 " if fps_ > 30 else ""
cmd = f"ffmpeg -i {media} {cmd_f} -ss 00:00:00 -to 00:00:03 -an -c:v libvpx-vp9 {fps_cmd}-fs 256K {resized_video}"
_, error, __, ___ = await run_cmd(cmd)
os.remove(media)
return resized_video
image = Image.open(media)
maxsize = 512
scale = maxsize / max(image.width, image.height)
new_size = (int(image.width * scale), int(image.height * scale))
image = image.resize(new_size, Image.LANCZOS)
resized_photo = "sticker.png"
image.save(resized_photo)
os.remove(media)
return resized_photo
async def convert_vid_to_vidnote(input_vid: str, final_path: str):
"""Convert Video To Video Note (Round)"""
media_info = MediaInfo.parse(input_vid)
for track in media_info.tracks:
if track.track_type == "Video":
aspect_ratio = track.display_aspect_ratio
height = track.height
width = track.width
if aspect_ratio != 1:
crop_by = min(height, width)
await run_cmd(
f'ffmpeg -i {input_vid} -vf "crop={crop_by}:{crop_by}" {final_path}'
)
os.remove(input_vid)
else:
os.rename(input_vid, final_path)
async def convert_image_to_image_note(input_path):
"""Crop Image To Circle"""
img = Image.open(input_path).convert("RGB")
npImage = np.array(img)
h, w = img.size
alpha = Image.new("L", img.size, 0)
draw = ImageDraw.Draw(alpha)
draw.pieslice([0, 0, h, w], 0, 360, fill=255)
npAlpha = np.array(alpha)
npImage = np.dstack((npImage, npAlpha))
img_path = "converted_by_FridayUB.webp"
Image.fromarray(npImage).save(img_path)
return img_path
def extract_w_h(file):
"""Extract Video's Width & Height"""
command_to_run = [
"ffprobe",
"-v",
"quiet",
"-print_format",
"json",
"-show_format",
"-show_streams",
file,
]
try:
t_response = subprocess.check_output(command_to_run, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
logging.error(str(exc))
else:
x_reponse = t_response.decode("UTF-8")
response_json = json.loads(x_reponse)
width = int(response_json["streams"][0]["width"])
height = int(response_json["streams"][0]["height"])
return width, height | /rootsuja-1.9.2.tar.gz/rootsuja-1.9.2/kynaylibs/nan/utils/tools.py | 0.497803 | 0.167525 | tools.py | pypi |
class Weebify:
NORMIE_FONT = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
]
WEEBY_FONT = [
"卂",
"乃",
"匚",
"刀",
"乇",
"下",
"厶",
"卄",
"工",
"丁",
"长",
"乚",
"从",
"𠘨",
"口",
"尸",
"㔿",
"尺",
"丂",
"丅",
"凵",
"リ",
"山",
"乂",
"丫",
"乙",
]
class Fs:
@property
def F(self):
paytext = "FF"
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8,
paytext * 8,
paytext * 2,
paytext * 2,
paytext * 2,
paytext * 6,
paytext * 6,
paytext * 2,
paytext * 2,
paytext * 2,
paytext * 2,
paytext * 2,
)
return pay
BIG_F = "██████╗\n" "██╔═══╝\n" "█████╗\n" "██╔══╝\n" "██║\n" "╚═╝"
FANCY_F = (
"⠀⠀⠀⢀⡤⢶⣶⣶⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀\n"
"⠀⠀⢀⣠⣤⣤⣤⣿⣧⣀⣀⣀⣀⣀⣀⣀⣀⣤⡄⠀\n"
"⢠⣾⡟⠋⠁⠀⠀⣸⠇⠈⣿⣿⡟⠉⠉⠉⠙⠻⣿⡀\n"
"⢺⣿⡀⠀⠀⢀⡴⠋⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⠙⠇\n"
"⠈⠛⠿⠶⠚⠋⣀⣤⣤⣤⣿⣿⣇⣀⣀⣴⡆⠀⠀⠀\n"
"⠀⠀⠀⠀⠠⡞⠋⠀⠀⠀⣿⣿⡏⠉⠛⠻⣿⡀⠀⠀\n"
"⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⡇⠀⠀⠀⠈⠁⠀⠀\n"
"⠀⠀⣠⣶⣶⣶⣶⡄⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀\n"
"⠀⢰⣿⠟⠉⠙⢿⡟⠀⠀⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀\n"
"⠀⢸⡟⠀⠀⠀⠘⠀⠀⠀⣿⣿⠃⠀⠀⠀⠀⠀⠀⠀\n"
"⠀⠈⢿⡄⠀⠀⠀⠀⠀⣼⣿⠏⠀⠀⠀⠀⠀⠀⠀⠀\n"
"⠀⠀⠀⠙⠷⠶⠶⠶⠿⠟⠉⠀⠀⠀⠀⠀⠀⠀⠀\n"
)
class Eval:
RUNNING = "**Expression:**\n```{}```\n\n**Running...**"
ERROR = "**Expression:**\n```{}```\n\n**Error:**\n```{}```"
SUCCESS = "**Expression:**\n```{}```\n\n**Success** | `None`"
RESULT = "**Expression:**\n```{}```\n\n**Result:**\n```{}```"
RESULT_FILE = "**Expression:**\n```{}```\n\n**Result:**\nView `output.txt` below ⤵"
ERROR_LOG = (
"**Evaluation Query**\n"
"```{}```\n"
'erred in chat "[{}](t.me/c/{}/{})" with error\n'
"```{}```"
)
SUCCESS_LOG = "Evaluation Query\n" "```{}```\n" 'succeeded in "[{}](t.me/c/{}/{})"'
RESULT_LOG = (
"Evaluation Query\n" "```{}```\n" 'executed in chat "[{}](t.me/c/{}/{})".'
)
class WWW:
SpeedTest = (
"Speedtest started at `{start}`\n\n"
"Ping:\n{ping} ms\n\n"
"Download:\n{download}\n\n"
"Upload:\n{upload}\n\n"
"ISP:\n__{isp}__"
)
NearestDC = "Country: `{}`\n" "Nearest Datacenter: `{}`\n" "This Datacenter: `{}`"
class MEMES:
REVERSE = (
"⠐⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠂\n"
"⠄⠄⣰⣾⣿⣿⣿⠿⠿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣆⠄⠄\n"
"⠄⠄⣿⣿⣿⡿⠋⠄⡀⣿⣿⣿⣿⣿⣿⣿⣿⠿⠛⠋⣉⣉⣉⡉⠙⠻⣿⣿⠄⠄\n"
"⠄⠄⣿⣿⣿⣇⠔⠈⣿⣿⣿⣿⣿⡿⠛⢉⣤⣶⣾⣿⣿⣿⣿⣿⣿⣦⡀⠹⠄⠄\n"
"⠄⠄⣿⣿⠃⠄⢠⣾⣿⣿⣿⠟⢁⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡄⠄⠄\n"
"⠄⠄⣿⣿⣿⣿⣿⣿⣿⠟⢁⣴⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⠄⠄\n"
"⠄⠄⣿⣿⣿⣿⣿⡟⠁⣴⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠄⠄\n"
"⠄⠄⣿⣿⣿⣿⠋⢠⣾⣿⣿⣿⣿⣿⣿⡿⠿⠿⠿⠿⣿⣿⣿⣿⣿⣿⣿⣿⠄⠄\n"
"⠄⠄⣿⣿⡿⠁⣰⣿⣿⣿⣿⣿⣿⣿⣿⠗⠄⠄⠄⠄⣿⣿⣿⣿⣿⣿⣿⡟⠄⠄\n"
"⠄⠄⣿⡿⠁⣼⣿⣿⣿⣿⣿⣿⡿⠋⠄⠄⠄⣠⣄⢰⣿⣿⣿⣿⣿⣿⣿⠃⠄⠄\n"
"⠄⠄⡿⠁⣼⣿⣿⣿⣿⣿⣿⣿⡇⠄⢀⡴⠚⢿⣿⣿⣿⣿⣿⣿⣿⣿⡏⢠⠄⠄\n"
"⠄⠄⠃⢰⣿⣿⣿⣿⣿⣿⡿⣿⣿⠴⠋⠄⠄⢸⣿⣿⣿⣿⣿⣿⣿⡟⢀⣾⠄⠄\n"
"⠄⠄⢀⣿⣿⣿⣿⣿⣿⣿⠃⠈⠁⠄⠄⢀⣴⣿⣿⣿⣿⣿⣿⣿⡟⢀⣾⣿⠄⠄\n"
"⠄⠄⢸⣿⣿⣿⣿⣿⣿⣿⠄⠄⠄⠄⢶⣿⣿⣿⣿⣿⣿⣿⣿⠏⢀⣾⣿⣿⠄⠄\n"
"⠄⠄⣿⣿⣿⣿⣿⣿⣿⣷⣶⣶⣶⣶⣶⣿⣿⣿⣿⣿⣿⣿⠋⣠⣿⣿⣿⣿⠄⠄\n"
"⠄⠄⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⢁⣼⣿⣿⣿⣿⣿⠄⠄\n"
"⠄⠄⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⢁⣴⣿⣿⣿⣿⣿⣿⣿⠄⠄\n"
"⠄⠄⠈⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠟⢁⣴⣿⣿⣿⣿⠗⠄⠄⣿⣿⠄⠄\n"
"⠄⠄⣆⠈⠻⢿⣿⣿⣿⣿⣿⣿⠿⠛⣉⣤⣾⣿⣿⣿⣿⣿⣇⠠⠺⣷⣿⣿⠄⠄\n"
"⠄⠄⣿⣿⣦⣄⣈⣉⣉⣉⣡⣤⣶⣿⣿⣿⣿⣿⣿⣿⣿⠉⠁⣀⣼⣿⣿⣿⠄⠄\n"
"⠄⠄⠻⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣶⣾⣿⣿⡿⠟⠄⠄\n"
"⠠⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄\n"
)
SLAP_TEMPLATES = [
"{hits} {victim} with a {item}.",
"{hits} {victim} in the face with a {item}.",
"{hits} {victim} around a bit with a {item}.",
"{throws} a {item} at {victim}.",
"grabs a {item} and {throws} it at {victim}'s face.",
"{hits} a {item} at {victim}.",
"{throws} a few {item} at {victim}.",
"grabs a {item} and {throws} it in {victim}'s face.",
"launches a {item} in {victim}'s general direction.",
"sits on {victim}'s face while slamming a {item} {where}.",
"starts slapping {victim} silly with a {item}.",
"pins {victim} down and repeatedly {hits} them with a {item}.",
"grabs up a {item} and {hits} {victim} with it.",
"starts slapping {victim} silly with a {item}.",
"holds {victim} down and repeatedly {hits} them with a {item}.",
"prods {victim} with a {item}.",
"picks up a {item} and {hits} {victim} with it.",
"ties {victim} to a chair and {throws} a {item} at them.",
"{hits} {victim} {where} with a {item}.",
"ties {victim} to a pole and whips them {where} with a {item}."
"gave a friendly push to help {victim} learn to swim in lava.",
"sent {victim} to /dev/null.",
"sent {victim} down the memory hole.",
"beheaded {victim}.",
"threw {victim} off a building.",
"replaced all of {victim}'s music with Nickelback.",
"spammed {victim}'s email.",
"made {victim} a knuckle sandwich.",
"slapped {victim} with pure nothing.",
"hit {victim} with a small, interstellar spaceship.",
"quickscoped {victim}.",
"put {victim} in check-mate.",
"RSA-encrypted {victim} and deleted the private key.",
"put {victim} in the friendzone.",
"slaps {victim} with a DMCA takedown request!",
]
ITEMS = [
"cast iron skillet",
"large trout",
"baseball bat",
"cricket bat",
"wooden cane",
"nail",
"printer",
"shovel",
"pair of trousers",
"CRT monitor",
"diamond sword",
"baguette",
"physics textbook",
"toaster",
"portrait of Richard Stallman",
"television",
"mau5head",
"five ton truck",
"roll of duct tape",
"book",
"laptop",
"old television",
"sack of rocks",
"rainbow trout",
"cobblestone block",
"lava bucket",
"rubber chicken",
"spiked bat",
"gold block",
"fire extinguisher",
"heavy rock",
"chunk of dirt",
"beehive",
"piece of rotten meat",
"bear",
"ton of bricks",
]
THROW = [
"throws",
"flings",
"chucks",
"hurls",
]
HIT = [
"hits",
"whacks",
"slaps",
"smacks",
"bashes",
]
WHERE = ["in the chest", "on the head", "on the butt", "on the crotch"]
REPLACEMENT_MAP = {
"a": "ɐ",
"b": "q",
"c": "ɔ",
"d": "p",
"e": "ǝ",
"f": "ɟ",
"g": "ƃ",
"h": "ɥ",
"i": "ᴉ",
"j": "ɾ",
"k": "ʞ",
"l": "l",
"m": "ɯ",
"n": "u",
"o": "o",
"p": "d",
"q": "b",
"r": "ɹ",
"s": "s",
"t": "ʇ",
"u": "n",
"v": "ʌ",
"w": "ʍ",
"x": "x",
"y": "ʎ",
"z": "z",
"A": "∀",
"B": "B",
"C": "Ɔ",
"D": "D",
"E": "Ǝ",
"F": "Ⅎ",
"G": "פ",
"H": "H",
"I": "I",
"J": "ſ",
"K": "K",
"L": "˥",
"M": "W",
"N": "N",
"O": "O",
"P": "Ԁ",
"Q": "Q",
"R": "R",
"S": "S",
"T": "┴",
"U": "∩",
"V": "Λ",
"W": "M",
"X": "X",
"Y": "⅄",
"Z": "Z",
"0": "0",
"1": "Ɩ",
"2": "ᄅ",
"3": "Ɛ",
"4": "ㄣ",
"5": "ϛ",
"6": "9",
"7": "ㄥ",
"8": "8",
"9": "6",
",": "'",
".": "˙",
"?": "¿",
"!": "¡",
'"': ",,",
"'": ",",
"(": ")",
")": "(",
"[": "]",
"]": "[",
"{": "}",
"}": "{",
"<": ">",
">": "<",
"&": "⅋",
"_": "‾",
}
SHRUGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
r"¯\_༼ •́ ͜ʖ •̀ ༽_/¯",
r"¯\_( ͡° ͜ʖ ͡°)_/¯",
r"¯\(°_o)/¯",
"┐( ∵ )┌",
r"¯\_༼ᴼل͜ᴼ༽_/¯",
"╮(. ❛ ᴗ ❛.)╭",
"乁༼◉‿◉✿༽ㄏ",
r"¯\(◉‿◉)/¯",
r"¯\_ʘ‿ʘ_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"╮(^▽^)╭",
"乁[ ◕ ᴥ ◕ ]ㄏ",
"乁[ᓀ˵▾˵ᓂ]ㄏ",
"┐(´(エ)`)┌",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
r"¯\_( ͠° ͟ʖ °͠ )_/¯",
"乁( •_• )ㄏ",
"乁| ・ 〰 ・ |ㄏ",
"┐(‘~;)┌",
"┐( ̄ヘ ̄)┌",
"┐(´д)┌",
"乁( . ര ʖ̯ ര . )ㄏ",
"乁 ˘ o ˘ ㄏ",
"乁ʕ •̀ •́ ʔㄏ",
r"¯\_(◕෴◕)_/¯",
r"¯\_〳 •̀ o •́ 〵_/¯",
"乁║ ˙ 益 ˙ ║ㄏ",
]
BRAIN = [
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <)🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠 <(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n🧠<(^_^ <) 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n(> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠 🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🧠🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n (> ^_^)>🗑",
"YOᑌᖇ ᗷᖇᗩIᑎ ➡️ 🧠\n\n <(^_^ <)🗑",
]
COCK = (
"⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠿⠿⢿⣿⣿⠿⠛⠿⣿⣿⣿⣿⣿\n"
"⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠿⠟⠉⠄⣀⡤⢤⣤⣈⠁⣠⡔⠶⣾⣿⣿⣿\n"
"⣿⣿⣿⣿⣿⣿⣿⡿⠛⠋⠁⠄⠄⠄⣼⣿⠁⡀⢹⣿⣷⢹⡇⠄⠎⣿⣿⣿\n"
"⣿⣿⣿⠿⠛⠉⠁⠄⠄⠄⠄⠄⠄⠄⠹⣇⣀⣡⣾⣿⡿⠉⠛⠒⠒⠋⠉⢸\n"
"⡿⠋⠁⠄⠄⢀⣤⣤⡀⠄⠄⠄⠄⠄⠄⠈⠙⠛⠛⠉⠄⠄⠄⠄⠄⠄⠄⠈\n"
"⠄⠄⠄⠄⠄⢹⣧⡈⠿⣷⣄⣀⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢀⣠⢄⣾\n"
"⠄⠄⠄⠄⠄⠈⠻⢿⣶⣌⣙⡛⠛⠿⠶⠶⠶⠶⠶⠖⣒⣒⣚⣋⡩⢱⣾⣿\n"
"⠄⠄⠄⠄⠄⠄⠄⠄⠈⠉⠛⠛⠛⠻⠿⠿⠟⠛⠛⠛⠉⢉⣥⣶⣾⣿⣿⣿\n"
"⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠒⠶⣿⣿⣿⣿⣿⣿⣿⣿\n"
"⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠈⠻⣿⣿⣿⣿⣿⣿\n"
"⣿⡿⠛⠛⠛⢻⣿⠿⠛⠛⠛⢿⣿⣿⡿⠛⠛⠛⢻⡟⠛⣿⡿⠛⣻⣿⣿⣿\n"
"⡟⠄⣼⣿⣿⣿⡇⠄⣾⣿⣧⠄⢻⡏⠄⣼⣿⣿⣿⡇⠄⡟⢀⣴⣿⣿⣿⣿\n"
"⡇⠄⣿⣿⣿⣿⡄⠄⣿⣿⣿⠄⢸⡇⠄⣿⣿⣿⣿⡇⠄⣀⠈⢻⣿⣿⣿⣿\n"
"⣿⣄⠈⠙⠛⢻⣧⡄⠙⠛⠉⣠⣿⣷⣄⠈⠙⠛⢹⡇⠄⣿⣧⠄⠻⣿⣿⣿\n"
)
TABLE_FLIPS = [
"(╯°Д°)╯︵/(.□ . \)",
"(˚Õ˚)ر ~~~~╚╩╩╝",
"(ノಠ益ಠ)ノ彡┻━┻",
"(╯°□°)╯︵ ┻━┻",
"(┛◉Д◉)┛彡┻━┻",
"┻━┻︵ \(°□°)/ ︵ ┻━┻",
"(┛ಠ_ಠ)┛彡┻━┻",
"(╯°□°)╯︵ ʞooqǝɔɐℲ",
]
GAMBAR_KONTOL = """
⣠⡶⠚⠛⠲⢄⡀
⣼⠁ ⠀⠀⠀ ⠳⢤⣄
⢿⠀⢧⡀⠀⠀⠀⠀⠀⢈⡇
⠈⠳⣼⡙⠒⠶⠶⠖⠚⠉⠳⣄
⠀⠀⠈⣇⠀⠀⠀⠀⠀⠀⠀⠈⠳⣄
⠀⠀⠀⠘⣆ ⠀⠀⠀⠀ ⠀⠈⠓⢦⣀
⠀⠀⠀⠀⠈⢳⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠲⢤
⠀⠀⠀⠀⠀⠀⠙⢦⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⢧
⠀⠀⠀⠀⠀⠀⠀⡴⠋⠓⠦⣤⡀⠀⠀⠀⠀⠀⠀⠀⠈⣇
⠀⠀⠀⠀⠀⠀⣸⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡄
⠀⠀⠀⠀⠀⠀⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡇
⠀⠀⠀⠀⠀⠀⢹⡄⠀⠀⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⠃
⠀⠀⠀⠀⠀⠀⠀⠙⢦⣀⣳⡀⠀⠀⠀⠀⠀⠀⠀⠀⣰⠏
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠛⢦⣀⣀⣀⣀⣠⡴⠚⠉
"""
GAMBAR_TITIT = """
😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋😋
😋😋😋😋😋😋
😋😋😋 😋😋😋
😋😋 😋😋
""" | /rootsuja-1.9.2.tar.gz/rootsuja-1.9.2/kynaylibs/nan/utils/constants.py | 0.58059 | 0.522507 | constants.py | pypi |
from math import ceil
from pyrogram.types import InlineKeyboardButton
class EqInlineKeyboardButton(InlineKeyboardButton):
def __eq__(self, other):
return self.text == other.text
def __lt__(self, other):
return self.text < other.text
def __gt__(self, other):
return self.text > other.text
def paginate_modules(page_n, module_dict, prefix, chat=None):
if not chat:
modules = sorted(
[
EqInlineKeyboardButton(
x.__MODULE__,
callback_data="{}_module({})".format(
prefix, x.__MODULE__.replace(" ", "_").lower()
),
)
for x in module_dict.values()
]
)
else:
modules = sorted(
[
EqInlineKeyboardButton(
x.__MODULE__,
callback_data="{}_module({},{})".format(
prefix, chat, x.__MODULE__.replace(" ", "_").lower()
),
)
for x in module_dict.values()
]
)
line = 4
pairs = list(zip(modules[::2], modules[1::2]))
i = 0
for m in pairs:
for _ in m:
i += 1
if len(modules) - i == 1:
pairs.append((modules[-1],))
elif len(modules) - i == 2:
pairs.append(
(
modules[-2],
modules[-1],
)
)
max_num_pages = ceil(len(pairs) / line)
modulo_page = page_n % max_num_pages
if len(pairs) > line:
pairs = pairs[modulo_page * line : line * (modulo_page + 1)] + [
(
EqInlineKeyboardButton(
"❮",
callback_data="{}_prev({})".format(prefix, modulo_page),
),
EqInlineKeyboardButton(
"❯",
callback_data="{}_next({})".format(prefix, modulo_page),
),
)
]
return pairs | /rootsuja-1.9.2.tar.gz/rootsuja-1.9.2/kynaylibs/nan/utils/inline.py | 0.662141 | 0.153708 | inline.py | pypi |
import asyncio
import re
import traceback
from io import BytesIO
from typing import Dict
from PIL import Image
from pyrogram import Client, errors, types
META_COMMENTS = re.compile(r"^ *# *meta +(\S+) *: *(.*?)\s*$", re.MULTILINE)
interact_with_to_delete = []
def text(message: types.Message) -> str:
"""Find text in `types.Message` object"""
return message.text if message.text else message.caption
async def aexec(code, c, m):
exec("async def __aexec(c, m): " + "".join(f"\n {l_}" for l_ in code.split("\n")))
return await locals()["__aexec"](c, m)
async def shell_exec(code, treat=True):
process = await asyncio.create_subprocess_shell(
code, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT
)
stdout = (await process.communicate())[0]
if treat:
stdout = stdout.decode().strip()
return stdout, process
def format_exc(e: Exception, suffix="") -> str:
traceback.print_exc()
if isinstance(e, errors.RPCError):
return (
f"<b>Telegram API error!</b>\n"
f"<code>[{e.CODE} {e.ID or e.NAME}] — {e.MESSAGE.format(value=e.value)}</code>\n\n<b>{suffix}</b>"
)
return (
f"<b>Error!</b>\n"
f"<code>{e.__class__.__name__}: {e}</code>\n\n<b>{suffix}</b>"
)
def with_reply(func):
async def wrapped(client: Client, message: types.Message):
if not message.reply_to_message:
await message.edit("<b>Reply to message is required</b>")
else:
return await func(client, message)
return wrapped
async def interact_with(message: types.Message) -> types.Message:
"""
Check history with bot and return bot's response
Example:
.. code-block:: python
bot_msg = await interact_with(await bot.send_message("@BotFather", "/start"))
:param message: already sent message to bot
:return: bot's response
"""
await asyncio.sleep(1)
# noinspection PyProtectedMember
response = [
msg async for msg in message._client.get_chat_history(message.chat.id, limit=1)
]
seconds_waiting = 0
while response[0].from_user.is_self:
seconds_waiting += 1
if seconds_waiting >= 5:
raise RuntimeError("bot didn't answer in 5 seconds")
await asyncio.sleep(1)
# noinspection PyProtectedMember
response = [
msg
async for msg in message._client.get_chat_history(message.chat.id, limit=1)
]
interact_with_to_delete.append(message.id)
interact_with_to_delete.append(response[0].id)
return response[0]
def resize_image(
input_img, output=None, img_type="PNG", size: int = 512, size2: int = None
):
if output is None:
output = BytesIO()
output.name = f"sticker.{img_type.lower()}"
with Image.open(input_img) as img:
# We used to use thumbnail(size) here, but it returns with a *max* dimension of 512,512
# rather than making one side exactly 512, so we have to calculate dimensions manually :(
if size2 is not None:
size = (size, size2)
elif img.width == img.height:
size = (size, size)
elif img.width < img.height:
size = (max(size * img.width // img.height, 1), size)
else:
size = (size, max(size * img.height // img.width, 1))
img.resize(size).save(output, img_type)
return output
def parse_meta_comments(code: str) -> Dict[str, str]:
try:
groups = META_COMMENTS.search(code).groups()
except AttributeError:
return {}
return {groups[i]: groups[i + 1] for i in range(0, len(groups), 2)}
async def rm_markdown(text: str):
"Remove basic markdown syntax from a string"
rmed = re.sub("[*`_]", "", text)
return rmed | /rootsuja-1.9.2.tar.gz/rootsuja-1.9.2/kynaylibs/nan/utils/function.py | 0.613468 | 0.233695 | function.py | pypi |
import requests
import json
import argparse
import argparse_actions
class Roomba(object):
"""
This class abstracts a roowifi and gives attributes for telemetry data,
as well as methods to command the robot
"""
def __init__(self, ip, user='admin', passwd='roombawifi'):
self.ip = ip
self.user = user
self.passwd = passwd
self.auth = (self.user, self.passwd)
try:
self._telemetry = self.telemetry()
except:
raise Exception('Robot cannot be contacted at {%s}' % self.ip)
self.charge = float(self._telemetry['response']['r18']['value'])
self.capacity = float(self._telemetry['response']['r19']['value'])
try:
self.battery = self.charge / self.capacity
except ZeroDivisionError:
self.battery = None
def telemetry(self):
"""
Roomba method which fetches telemetry data about the robot. Returns
a dictionary.
"""
r = requests.get('http://' + self.ip + '/roomba.json', auth=self.auth)
return json.loads(r.text)
def clean(self):
"""
Roomba method which commands the robot to emulate the 'clean' button press. Returns
requests response object.
"""
r = requests.get('http://' + self.ip + '/rwr.cgi', params={'exec': '4'}, auth=self.auth)
return r
def spot(self):
"""
Roomba method which commands the robot to emulate the 'spot clean' button press. Returns
requests response object.
"""
r = requests.get('http://' + self.ip + '/rwr.cgi', params={'exec': '5'}, auth=self.auth)
return r
def dock(self):
"""
Roomba method which commands the robot to emulate the 'dock' button press. Returns
requests response object.
"""
r = requests.get('http://' + self.ip + '/rwr.cgi', params={'exec': '6'}, auth=self.auth)
return r
def idle(self):
"""
Roomba method which commands the robot to idle. Returns
requests response object.
"""
r = requests.get('http://' + self.ip + '/rwr.cgi', params={'exec': '1'}, auth=self.auth)
return r
def main():
parser = argparse.ArgumentParser(description='A commandline utility for controlling a Roomba via a RooWifi device.')
parser.add_argument('ip_address', action=argparse_actions.ProperIpFormatAction, help='ip address of target robot')
parser.add_argument('command', choices=('clean', 'spot', 'dock', 'idle'), help='command to be issued')
parser.add_argument('-u', '--user', help='username')
parser.add_argument('-p', '--passwd', help='password')
try:
args = parser.parse_args()
except argparse_actions.InvalidIp as e:
raise Exception('IP is invalid: {0}'.format(e.ip))
kwargs = {}
if args.user:
kwargs['user'] = args.user
if args.passwd:
kwargs['passwd'] = args.passwd
roomba = Roomba(args.ip_address, **kwargs)
if args.command == 'clean':
roomba.clean()
elif args.command == 'spot':
roomba.spot()
elif args.command == 'dock':
roomba.dock()
elif args.command == 'idle':
roomba.idle()
if __name__ == '__main__':
main() | /roowifi-0.0.2.tar.gz/roowifi-0.0.2/roowifi.py | 0.620392 | 0.159675 | roowifi.py | pypi |
import unittest
from textwrap import dedent
import rope.base.taskhandle
import rope.refactor.introduce_parameter
import ropetest.refactor.extracttest
import ropetest.refactor.importutilstest
import ropetest.refactor.inlinetest
import ropetest.refactor.movetest
import ropetest.refactor.multiprojecttest
import ropetest.refactor.patchedasttest
import ropetest.refactor.renametest
import ropetest.refactor.restructuretest
import ropetest.refactor.suitestest
import ropetest.refactor.usefunctiontest
from rope.base.exceptions import InterruptedTaskError, RefactoringError
from rope.refactor.encapsulate_field import EncapsulateField
from rope.refactor.introduce_factory import IntroduceFactory
from rope.refactor.localtofield import LocalToField
from rope.refactor.method_object import MethodObject
from ropetest import testutils
from ropetest.refactor import change_signature_test, similarfindertest
class MethodObjectTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, "mod")
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def test_empty_method(self):
code = dedent("""\
def func():
pass
""")
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index("func"))
expected = dedent("""\
class _New(object):
def __call__(self):
pass
""")
self.assertEqual(
expected,
replacer.get_new_class("_New"),
)
def test_trivial_return(self):
code = dedent("""\
def func():
return 1
""")
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index("func"))
expected = dedent("""\
class _New(object):
def __call__(self):
return 1
""")
self.assertEqual(
expected,
replacer.get_new_class("_New"),
)
def test_multi_line_header(self):
code = dedent("""\
def func(
):
return 1
""")
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index("func"))
expected = dedent("""\
class _New(object):
def __call__(self):
return 1
""")
self.assertEqual(
expected,
replacer.get_new_class("_New"),
)
def test_a_single_parameter(self):
code = dedent("""\
def func(param):
return 1
""")
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index("func"))
expected = dedent("""\
class _New(object):
def __init__(self, param):
self.param = param
def __call__(self):
return 1
""")
self.assertEqual(
expected,
replacer.get_new_class("_New"),
)
def test_self_parameter(self):
code = dedent("""\
def func(self):
return 1
""")
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index("func"))
expected = dedent("""\
class _New(object):
def __init__(self, host):
self.self = host
def __call__(self):
return 1
""")
self.assertEqual(
expected,
replacer.get_new_class("_New"),
)
def test_simple_using_passed_parameters(self):
code = dedent("""\
def func(param):
return param
""")
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index("func"))
expected = dedent("""\
class _New(object):
def __init__(self, param):
self.param = param
def __call__(self):
return self.param
""")
self.assertEqual(
expected,
replacer.get_new_class("_New"),
)
def test_self_keywords_and_args_parameters(self):
code = dedent("""\
def func(arg, *args, **kwds):
result = arg + args[0] + kwds[arg]
return result
""")
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index("func"))
expected = dedent("""\
class _New(object):
def __init__(self, arg, args, kwds):
self.arg = arg
self.args = args
self.kwds = kwds
def __call__(self):
result = self.arg + self.args[0] + self.kwds[self.arg]
return result
""")
self.assertEqual(expected, replacer.get_new_class("_New"))
def test_performing_on_not_a_function(self):
code = dedent("""\
my_var = 10
""")
self.mod.write(code)
with self.assertRaises(RefactoringError):
MethodObject(self.project, self.mod, code.index("my_var"))
def test_changing_the_module(self):
code = dedent("""\
def func():
return 1
""")
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index("func"))
self.project.do(replacer.get_changes("_New"))
expected = dedent("""\
def func():
return _New()()
class _New(object):
def __call__(self):
return 1
""")
self.assertEqual(expected, self.mod.read())
def test_changing_the_module_and_class_methods(self):
code = dedent("""\
class C(object):
def a_func(self):
return 1
def another_func(self):
pass
""")
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index("func"))
self.project.do(replacer.get_changes("_New"))
expected = dedent("""\
class C(object):
def a_func(self):
return _New(self)()
def another_func(self):
pass
class _New(object):
def __init__(self, host):
self.self = host
def __call__(self):
return 1
""")
self.assertEqual(expected, self.mod.read())
class IntroduceFactoryTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def _introduce_factory(self, resource, offset, *args, **kwds):
factory_introducer = IntroduceFactory(self.project, resource, offset)
changes = factory_introducer.get_changes(*args, **kwds)
self.project.do(changes)
def test_adding_the_method(self):
code = dedent("""\
class AClass(object):
an_attr = 10
""")
mod = testutils.create_module(self.project, "mod")
mod.write(code)
expected = dedent("""\
class AClass(object):
an_attr = 10
@staticmethod
def create(*args, **kwds):
return AClass(*args, **kwds)
""")
self._introduce_factory(mod, mod.read().index("AClass") + 1, "create")
self.assertEqual(expected, mod.read())
def test_changing_occurrences_in_the_main_module(self):
code = dedent("""\
class AClass(object):
an_attr = 10
a_var = AClass()""")
mod = testutils.create_module(self.project, "mod")
mod.write(code)
expected = dedent("""\
class AClass(object):
an_attr = 10
@staticmethod
def create(*args, **kwds):
return AClass(*args, **kwds)
a_var = AClass.create()""")
self._introduce_factory(mod, mod.read().index("AClass") + 1, "create")
self.assertEqual(expected, mod.read())
def test_changing_occurrences_with_arguments(self):
code = dedent("""\
class AClass(object):
def __init__(self, arg):
pass
a_var = AClass(10)
""")
mod = testutils.create_module(self.project, "mod")
mod.write(code)
expected = dedent("""\
class AClass(object):
def __init__(self, arg):
pass
@staticmethod
def create(*args, **kwds):
return AClass(*args, **kwds)
a_var = AClass.create(10)
""")
self._introduce_factory(mod, mod.read().index("AClass") + 1, "create")
self.assertEqual(expected, mod.read())
def test_changing_occurrences_in_other_modules(self):
mod1 = testutils.create_module(self.project, "mod1")
mod2 = testutils.create_module(self.project, "mod2")
mod1.write("class AClass(object):\n an_attr = 10\n")
mod2.write("import mod1\na_var = mod1.AClass()\n")
self._introduce_factory(mod1, mod1.read().index("AClass") + 1, "create")
expected1 = dedent("""\
class AClass(object):
an_attr = 10
@staticmethod
def create(*args, **kwds):
return AClass(*args, **kwds)
""")
expected2 = dedent("""\
import mod1
a_var = mod1.AClass.create()
""")
self.assertEqual(expected1, mod1.read())
self.assertEqual(expected2, mod2.read())
def test_raising_exception_for_non_classes(self):
mod = testutils.create_module(self.project, "mod")
mod.write("def a_func():\n pass\n")
with self.assertRaises(RefactoringError):
self._introduce_factory(mod, mod.read().index("a_func") + 1, "create")
def test_undoing_introduce_factory(self):
mod1 = testutils.create_module(self.project, "mod1")
mod2 = testutils.create_module(self.project, "mod2")
code1 = dedent("""\
class AClass(object):
an_attr = 10
""")
mod1.write(code1)
code2 = dedent("""\
from mod1 import AClass
a_var = AClass()
""")
mod2.write(code2)
self._introduce_factory(mod1, mod1.read().index("AClass") + 1, "create")
self.project.history.undo()
self.assertEqual(code1, mod1.read())
self.assertEqual(code2, mod2.read())
def test_using_on_an_occurrence_outside_the_main_module(self):
mod1 = testutils.create_module(self.project, "mod1")
mod2 = testutils.create_module(self.project, "mod2")
mod1.write("class AClass(object):\n an_attr = 10\n")
mod2.write("import mod1\na_var = mod1.AClass()\n")
self._introduce_factory(mod2, mod2.read().index("AClass") + 1, "create")
expected1 = dedent("""\
class AClass(object):
an_attr = 10
@staticmethod
def create(*args, **kwds):
return AClass(*args, **kwds)
""")
expected2 = "import mod1\n" "a_var = mod1.AClass.create()\n"
self.assertEqual(expected1, mod1.read())
self.assertEqual(expected2, mod2.read())
def test_introduce_factory_in_nested_scopes(self):
code = dedent("""\
def create_var():
class AClass(object):
an_attr = 10
return AClass()
""")
mod = testutils.create_module(self.project, "mod")
mod.write(code)
expected = dedent("""\
def create_var():
class AClass(object):
an_attr = 10
@staticmethod
def create(*args, **kwds):
return AClass(*args, **kwds)
return AClass.create()
""")
self._introduce_factory(mod, mod.read().index("AClass") + 1, "create")
self.assertEqual(expected, mod.read())
def test_adding_factory_for_global_factories(self):
code = dedent("""\
class AClass(object):
an_attr = 10
""")
mod = testutils.create_module(self.project, "mod")
mod.write(code)
expected = dedent("""\
class AClass(object):
an_attr = 10
def create(*args, **kwds):
return AClass(*args, **kwds)
""")
self._introduce_factory(
mod, mod.read().index("AClass") + 1, "create", global_factory=True
)
self.assertEqual(expected, mod.read())
def test_get_name_for_factories(self):
code = dedent("""\
class C(object):
pass
""")
mod = testutils.create_module(self.project, "mod")
mod.write(code)
factory = IntroduceFactory(self.project, mod, mod.read().index("C") + 1)
self.assertEqual("C", factory.get_name())
def test_raising_exception_for_global_factory_for_nested_classes(self):
code = dedent("""\
def create_var():
class AClass(object):
an_attr = 10
return AClass()
""")
mod = testutils.create_module(self.project, "mod")
mod.write(code)
with self.assertRaises(RefactoringError):
self._introduce_factory(
mod, mod.read().index("AClass") + 1, "create", global_factory=True
)
def test_changing_occurrences_in_the_main_module_for_global_factories(self):
code = dedent("""\
class AClass(object):
an_attr = 10
a_var = AClass()""")
mod = testutils.create_module(self.project, "mod")
mod.write(code)
expected = dedent("""\
class AClass(object):
an_attr = 10
def create(*args, **kwds):
return AClass(*args, **kwds)
a_var = create()""")
self._introduce_factory(
mod, mod.read().index("AClass") + 1, "create", global_factory=True
)
self.assertEqual(expected, mod.read())
def test_changing_occurrences_in_other_modules_for_global_factories(self):
mod1 = testutils.create_module(self.project, "mod1")
mod2 = testutils.create_module(self.project, "mod2")
mod1.write("class AClass(object):\n an_attr = 10\n")
mod2.write("import mod1\na_var = mod1.AClass()\n")
self._introduce_factory(
mod1, mod1.read().index("AClass") + 1, "create", global_factory=True
)
expected1 = dedent("""\
class AClass(object):
an_attr = 10
def create(*args, **kwds):
return AClass(*args, **kwds)
""")
expected2 = "import mod1\n" "a_var = mod1.create()\n"
self.assertEqual(expected1, mod1.read())
self.assertEqual(expected2, mod2.read())
def test_import_if_necessary_in_other_mods_for_global_factories(self):
mod1 = testutils.create_module(self.project, "mod1")
mod2 = testutils.create_module(self.project, "mod2")
mod1.write("class AClass(object):\n an_attr = 10\n")
mod2.write("from mod1 import AClass\npair = AClass(), AClass\n")
self._introduce_factory(
mod1, mod1.read().index("AClass") + 1, "create", global_factory=True
)
expected1 = dedent("""\
class AClass(object):
an_attr = 10
def create(*args, **kwds):
return AClass(*args, **kwds)
""")
expected2 = dedent("""\
from mod1 import AClass, create
pair = create(), AClass
""")
self.assertEqual(expected1, mod1.read())
self.assertEqual(expected2, mod2.read())
def test_changing_occurrences_for_renamed_classes(self):
code = dedent("""\
class AClass(object):
an_attr = 10
a_class = AClass
a_var = a_class()""")
mod = testutils.create_module(self.project, "mod")
mod.write(code)
expected = dedent("""\
class AClass(object):
an_attr = 10
@staticmethod
def create(*args, **kwds):
return AClass(*args, **kwds)
a_class = AClass
a_var = a_class()""")
self._introduce_factory(mod, mod.read().index("a_class") + 1, "create")
self.assertEqual(expected, mod.read())
def test_changing_occurs_in_the_same_module_with_conflict_ranges(self):
mod = testutils.create_module(self.project, "mod")
code = dedent("""\
class C(object):
def create(self):
return C()
""")
mod.write(code)
self._introduce_factory(mod, mod.read().index("C"), "create_c", True)
expected = dedent("""\
class C(object):
def create(self):
return create_c()
""")
self.assertTrue(mod.read().startswith(expected))
def _transform_module_to_package(self, resource):
self.project.do(
rope.refactor.ModuleToPackage(self.project, resource).get_changes()
)
def test_transform_module_to_package(self):
mod1 = testutils.create_module(self.project, "mod1")
mod1.write("import mod2\nfrom mod2 import AClass\n")
mod2 = testutils.create_module(self.project, "mod2")
mod2.write("class AClass(object):\n pass\n")
self._transform_module_to_package(mod2)
mod2 = self.project.get_resource("mod2")
root_folder = self.project.root
self.assertFalse(root_folder.has_child("mod2.py"))
self.assertEqual(
"class AClass(object):\n pass\n",
root_folder.get_child("mod2").get_child("__init__.py").read(),
)
def test_transform_module_to_package_undoing(self):
pkg = testutils.create_package(self.project, "pkg")
mod = testutils.create_module(self.project, "mod", pkg)
self._transform_module_to_package(mod)
self.assertFalse(pkg.has_child("mod.py"))
self.assertTrue(pkg.get_child("mod").has_child("__init__.py"))
self.project.history.undo()
self.assertTrue(pkg.has_child("mod.py"))
self.assertFalse(pkg.has_child("mod"))
def test_transform_module_to_package_with_relative_imports(self):
pkg = testutils.create_package(self.project, "pkg")
mod1 = testutils.create_module(self.project, "mod1", pkg)
mod1.write("import mod2\nfrom mod2 import AClass\n")
mod2 = testutils.create_module(self.project, "mod2", pkg)
mod2.write("class AClass(object):\n pass\n")
self._transform_module_to_package(mod1)
new_init = self.project.get_resource("pkg/mod1/__init__.py")
self.assertEqual(
"import pkg.mod2\nfrom pkg.mod2 import AClass\n", new_init.read()
)
def test_resources_parameter(self):
code = dedent("""\
class A(object):
an_attr = 10
""")
code1 = dedent("""\
import mod
a = mod.A()
""")
mod = testutils.create_module(self.project, "mod")
mod1 = testutils.create_module(self.project, "mod1")
mod.write(code)
mod1.write(code1)
expected = dedent("""\
class A(object):
an_attr = 10
@staticmethod
def create(*args, **kwds):
return A(*args, **kwds)
""")
self._introduce_factory(
mod, mod.read().index("A") + 1, "create", resources=[mod]
)
self.assertEqual(expected, mod.read())
self.assertEqual(code1, mod1.read())
class EncapsulateFieldTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, "mod")
self.mod1 = testutils.create_module(self.project, "mod1")
self.a_class = dedent("""\
class A(object):
def __init__(self):
self.attr = 1
""")
self.added_methods = (
"\n"
" def get_attr(self):\n"
" return self.attr\n\n"
" def set_attr(self, value):\n"
" self.attr = value\n"
)
self.encapsulated = self.a_class + self.added_methods
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def _encapsulate(self, resource, offset, **args):
changes = EncapsulateField(self.project, resource, offset).get_changes(**args)
self.project.do(changes)
def test_adding_getters_and_setters(self):
code = self.a_class
self.mod.write(code)
self._encapsulate(self.mod, code.index("attr") + 1)
self.assertEqual(self.encapsulated, self.mod.read())
def test_changing_getters_in_other_modules(self):
code = dedent("""\
import mod
a_var = mod.A()
range(a_var.attr)
""")
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index("attr") + 1)
expected = dedent("""\
import mod
a_var = mod.A()
range(a_var.get_attr())
""")
self.assertEqual(expected, self.mod1.read())
def test_changing_setters_in_other_modules(self):
code = dedent("""\
import mod
a_var = mod.A()
a_var.attr = 1
""")
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index("attr") + 1)
expected = dedent("""\
import mod
a_var = mod.A()
a_var.set_attr(1)
""")
self.assertEqual(expected, self.mod1.read())
def test_changing_getters_in_setters(self):
code = dedent("""\
import mod
a_var = mod.A()
a_var.attr = 1 + a_var.attr
""")
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index("attr") + 1)
expected = dedent("""\
import mod
a_var = mod.A()
a_var.set_attr(1 + a_var.get_attr())
""")
self.assertEqual(expected, self.mod1.read())
def test_appending_to_class_end(self):
self.mod1.write(self.a_class + "a_var = A()\n")
self._encapsulate(self.mod1, self.mod1.read().index("attr") + 1)
self.assertEqual(self.encapsulated + "a_var = A()\n", self.mod1.read())
def test_performing_in_other_modules(self):
code = dedent("""\
import mod
a_var = mod.A()
range(a_var.attr)
""")
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod1, self.mod1.read().index("attr") + 1)
self.assertEqual(self.encapsulated, self.mod.read())
expected = dedent("""\
import mod
a_var = mod.A()
range(a_var.get_attr())
""")
self.assertEqual(expected, self.mod1.read())
def test_changing_main_module_occurrences(self):
code = self.a_class + "a_var = A()\n" "a_var.attr = a_var.attr * 2\n"
self.mod1.write(code)
self._encapsulate(self.mod1, self.mod1.read().index("attr") + 1)
expected = (
self.encapsulated + "a_var = A()\n" "a_var.set_attr(a_var.get_attr() * 2)\n"
)
self.assertEqual(expected, self.mod1.read())
def test_raising_exception_when_performed_on_non_attributes(self):
self.mod1.write("attr = 10")
with self.assertRaises(RefactoringError):
self._encapsulate(self.mod1, self.mod1.read().index("attr") + 1)
def test_raising_exception_on_tuple_assignments(self):
self.mod.write(self.a_class)
code = dedent("""\
import mod
a_var = mod.A()
a_var.attr = 1
a_var.attr, b = 1, 2
""")
self.mod1.write(code)
with self.assertRaises(RefactoringError):
self._encapsulate(self.mod1, self.mod1.read().index("attr") + 1)
def test_raising_exception_on_tuple_assignments2(self):
self.mod.write(self.a_class)
code = dedent("""\
import mod
a_var = mod.A()
a_var.attr = 1
b, a_var.attr = 1, 2
""")
self.mod1.write(code)
with self.assertRaises(RefactoringError):
self._encapsulate(self.mod1, self.mod1.read().index("attr") + 1)
def test_tuple_assignments_and_function_calls(self):
code = dedent("""\
import mod
def func(a1=0, a2=0):
pass
a_var = mod.A()
func(a_var.attr, a2=2)
""")
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index("attr") + 1)
expected = dedent("""\
import mod
def func(a1=0, a2=0):
pass
a_var = mod.A()
func(a_var.get_attr(), a2=2)
""")
self.assertEqual(expected, self.mod1.read())
def test_tuple_assignments(self):
code = dedent("""\
import mod
a_var = mod.A()
a, b = a_var.attr, 1
""")
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index("attr") + 1)
expected = dedent("""\
import mod
a_var = mod.A()
a, b = a_var.get_attr(), 1
""")
self.assertEqual(expected, self.mod1.read())
def test_changing_augmented_assignments(self):
code = "import mod\n" "a_var = mod.A()\n" "a_var.attr += 1\n"
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index("attr") + 1)
expected = dedent("""\
import mod
a_var = mod.A()
a_var.set_attr(a_var.get_attr() + 1)
""")
self.assertEqual(expected, self.mod1.read())
def test_changing_augmented_assignments2(self):
code = dedent("""\
import mod
a_var = mod.A()
a_var.attr <<= 1
""")
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index("attr") + 1)
expected = dedent("""\
import mod
a_var = mod.A()
a_var.set_attr(a_var.get_attr() << 1)
""")
self.assertEqual(expected, self.mod1.read())
def test_changing_occurrences_inside_the_class(self):
new_class = (
self.a_class + "\n" " def a_func(self):\n" " self.attr = 1\n"
)
self.mod.write(new_class)
self._encapsulate(self.mod, self.mod.read().index("attr") + 1)
expected = (
self.a_class + "\n"
" def a_func(self):\n"
" self.set_attr(1)\n" + self.added_methods
)
self.assertEqual(expected, self.mod.read())
def test_getter_and_setter_parameters(self):
self.mod.write(self.a_class)
self._encapsulate(
self.mod,
self.mod.read().index("attr") + 1,
getter="getAttr",
setter="setAttr",
)
new_methods = self.added_methods.replace("get_attr", "getAttr").replace(
"set_attr", "setAttr"
)
expected = self.a_class + new_methods
self.assertEqual(expected, self.mod.read())
def test_using_resources_parameter(self):
self.mod1.write("import mod\na = mod.A()\nvar = a.attr\n")
self.mod.write(self.a_class)
self._encapsulate(
self.mod, self.mod.read().index("attr") + 1, resources=[self.mod]
)
self.assertEqual("import mod\na = mod.A()\nvar = a.attr\n", self.mod1.read())
expected = self.a_class + self.added_methods
self.assertEqual(expected, self.mod.read())
class LocalToFieldTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, "mod")
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def _perform_convert_local_variable_to_field(self, resource, offset):
changes = LocalToField(self.project, resource, offset).get_changes()
self.project.do(changes)
def test_simple_local_to_field(self):
code = dedent("""\
class A(object):
def a_func(self):
var = 10
""")
self.mod.write(code)
self._perform_convert_local_variable_to_field(self.mod, code.index("var") + 1)
expected = dedent("""\
class A(object):
def a_func(self):
self.var = 10
""")
self.assertEqual(expected, self.mod.read())
def test_raising_exception_when_performed_on_a_global_var(self):
self.mod.write("var = 10\n")
with self.assertRaises(RefactoringError):
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().index("var") + 1
)
def test_raising_exception_when_performed_on_field(self):
code = dedent("""\
class A(object):
def a_func(self):
self.var = 10
""")
self.mod.write(code)
with self.assertRaises(RefactoringError):
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().index("var") + 1
)
def test_raising_exception_when_performed_on_a_parameter(self):
code = dedent("""\
class A(object):
def a_func(self, var):
a = var
""")
self.mod.write(code)
with self.assertRaises(RefactoringError):
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().index("var") + 1
)
# NOTE: This situation happens a lot and is normally not an error
# @testutils.assert_raises(RefactoringError)
def test_not_raise_exception_when_there_is_a_field_with_the_same_name(self):
code = dedent("""\
class A(object):
def __init__(self):
self.var = 1
def a_func(self):
var = 10
""")
self.mod.write(code)
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().rindex("var") + 1
)
def test_local_to_field_with_self_renamed(self):
code = dedent("""\
class A(object):
def a_func(myself):
var = 10
""")
self.mod.write(code)
self._perform_convert_local_variable_to_field(self.mod, code.index("var") + 1)
expected = dedent("""\
class A(object):
def a_func(myself):
myself.var = 10
""")
self.assertEqual(expected, self.mod.read())
class IntroduceParameterTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, "mod")
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def _introduce_parameter(self, offset, name):
rope.refactor.introduce_parameter.IntroduceParameter(
self.project, self.mod, offset
).get_changes(name).do()
def test_simple_case(self):
code = dedent("""\
var = 1
def f():
b = var
""")
self.mod.write(code)
offset = self.mod.read().rindex("var")
self._introduce_parameter(offset, "var")
expected = dedent("""\
var = 1
def f(var=var):
b = var
""")
self.assertEqual(expected, self.mod.read())
def test_changing_function_body(self):
code = dedent("""\
var = 1
def f():
b = var
""")
self.mod.write(code)
offset = self.mod.read().rindex("var")
self._introduce_parameter(offset, "p1")
expected = dedent("""\
var = 1
def f(p1=var):
b = p1
""")
self.assertEqual(expected, self.mod.read())
def test_unknown_variables(self):
self.mod.write("def f():\n b = var + c\n")
offset = self.mod.read().rindex("var")
with self.assertRaises(RefactoringError):
self._introduce_parameter(offset, "p1")
self.assertEqual("def f(p1=var):\n b = p1 + c\n", self.mod.read())
def test_failing_when_not_inside(self):
self.mod.write("var = 10\nb = var\n")
offset = self.mod.read().rindex("var")
with self.assertRaises(RefactoringError):
self._introduce_parameter(offset, "p1")
def test_attribute_accesses(self):
code = dedent("""\
class C(object):
a = 10
c = C()
def f():
b = c.a
""")
self.mod.write(code)
offset = self.mod.read().rindex("a")
self._introduce_parameter(offset, "p1")
expected = dedent("""\
class C(object):
a = 10
c = C()
def f(p1=c.a):
b = p1
""")
self.assertEqual(expected, self.mod.read())
def test_introducing_parameters_for_methods(self):
code = dedent("""\
var = 1
class C(object):
def f(self):
b = var
""")
self.mod.write(code)
offset = self.mod.read().rindex("var")
self._introduce_parameter(offset, "p1")
expected = dedent("""\
var = 1
class C(object):
def f(self, p1=var):
b = p1
""")
self.assertEqual(expected, self.mod.read())
class _MockTaskObserver:
def __init__(self):
self.called = 0
def __call__(self):
self.called += 1
class TaskHandleTest(unittest.TestCase):
def test_trivial_case(self):
handle = rope.base.taskhandle.TaskHandle()
self.assertFalse(handle.is_stopped())
def test_stopping(self):
handle = rope.base.taskhandle.TaskHandle()
handle.stop()
self.assertTrue(handle.is_stopped())
def test_job_sets(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset()
self.assertEqual([jobs], handle.get_jobsets())
def test_starting_and_finishing_jobs(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset(name="test job set", count=1)
jobs.started_job("job1")
jobs.finished_job()
def test_test_checking_status(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset()
handle.stop()
with self.assertRaises(InterruptedTaskError):
jobs.check_status()
def test_test_checking_status_when_starting(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset()
handle.stop()
with self.assertRaises(InterruptedTaskError):
jobs.started_job("job1")
def test_calling_the_observer_after_stopping(self):
handle = rope.base.taskhandle.TaskHandle()
observer = _MockTaskObserver()
handle.add_observer(observer)
handle.stop()
self.assertEqual(1, observer.called)
def test_calling_the_observer_after_creating_job_sets(self):
handle = rope.base.taskhandle.TaskHandle()
observer = _MockTaskObserver()
handle.add_observer(observer)
jobs = handle.create_jobset() # noqa
self.assertEqual(1, observer.called)
def test_calling_the_observer_when_starting_and_finishing_jobs(self):
handle = rope.base.taskhandle.TaskHandle()
observer = _MockTaskObserver()
handle.add_observer(observer)
jobs = handle.create_jobset(name="test job set", count=1)
jobs.started_job("job1")
jobs.finished_job()
self.assertEqual(3, observer.called)
def test_job_set_get_percent_done(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset(name="test job set", count=2)
self.assertEqual(0, jobs.get_percent_done())
jobs.started_job("job1")
jobs.finished_job()
self.assertEqual(50, jobs.get_percent_done())
jobs.started_job("job2")
jobs.finished_job()
self.assertEqual(100, jobs.get_percent_done())
def test_getting_job_name(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset(name="test job set", count=1)
# recommended name/job_name attribute
self.assertEqual("test job set", jobs.name)
self.assertEqual(None, jobs.job_name)
# deprecated getters
self.assertEqual("test job set", jobs.get_name())
self.assertEqual(None, jobs.get_active_job_name())
jobs.started_job("job1")
# recommended name/job_name attribute
self.assertEqual("test job set", jobs.get_name())
# deprecated getters
self.assertEqual("job1", jobs.get_active_job_name()) | /rope-1.9.0.tar.gz/rope-1.9.0/ropetest/refactor/__init__.py | 0.528777 | 0.270333 | __init__.py | pypi |
import unittest
import rope.base.taskhandle
import rope.refactor.introduce_parameter
import ropetest.refactor.extracttest
import ropetest.refactor.importutilstest
import ropetest.refactor.inlinetest
import ropetest.refactor.movetest
import ropetest.refactor.multiprojecttest
import ropetest.refactor.patchedasttest
import ropetest.refactor.renametest
import ropetest.refactor.restructuretest
import ropetest.refactor.suitestest
import ropetest.refactor.usefunctiontest
from rope.base.exceptions import RefactoringError, InterruptedTaskError
from rope.refactor.encapsulate_field import EncapsulateField
from rope.refactor.introduce_factory import IntroduceFactory
from rope.refactor.localtofield import LocalToField
from rope.refactor.method_object import MethodObject
from ropetest import testutils
from ropetest.refactor import change_signature_test, similarfindertest
class MethodObjectTest(unittest.TestCase):
def setUp(self):
super(MethodObjectTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
def tearDown(self):
testutils.remove_project(self.project)
super(MethodObjectTest, self).tearDown()
def test_empty_method(self):
code = 'def func():\n pass\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n def __call__(self):\n pass\n',
replacer.get_new_class('_New'))
def test_trivial_return(self):
code = 'def func():\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n def __call__(self):\n return 1\n',
replacer.get_new_class('_New'))
def test_multi_line_header(self):
code = 'def func(\n ):\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n def __call__(self):\n return 1\n',
replacer.get_new_class('_New'))
def test_a_single_parameter(self):
code = 'def func(param):\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n'
' def __init__(self, param):\n self.param = param\n\n'
' def __call__(self):\n return 1\n',
replacer.get_new_class('_New'))
def test_self_parameter(self):
code = 'def func(self):\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n'
' def __init__(self, host):\n self.self = host\n\n'
' def __call__(self):\n return 1\n',
replacer.get_new_class('_New'))
def test_simple_using_passed_parameters(self):
code = 'def func(param):\n return param\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n'
' def __init__(self, param):\n self.param = param\n\n'
' def __call__(self):\n return self.param\n',
replacer.get_new_class('_New'))
def test_self_keywords_and_args_parameters(self):
code = 'def func(arg, *args, **kwds):\n' \
' result = arg + args[0] + kwds[arg]\n' \
' return result\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
expected = 'class _New(object):\n\n' \
' def __init__(self, arg, args, kwds):\n' \
' self.arg = arg\n' \
' self.args = args\n' \
' self.kwds = kwds\n\n' \
' def __call__(self):\n' \
' result = self.arg + self.args[0] + self.kwds[self.arg]\n' \
' return result\n'
self.assertEquals(expected, replacer.get_new_class('_New'))
@testutils.assert_raises(RefactoringError)
def test_performing_on_not_a_function(self):
code = 'my_var = 10\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('my_var'))
def test_changing_the_module(self):
code = 'def func():\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.project.do(replacer.get_changes('_New'))
expected = 'def func():\n' \
' return _New()()\n\n\n' \
'class _New(object):\n\n' \
' def __call__(self):\n' \
' return 1\n'
self.assertEquals(expected, self.mod.read())
def test_changing_the_module_and_class_methods(self):
code = 'class C(object):\n\n' \
' def a_func(self):\n' \
' return 1\n\n' \
' def another_func(self):\n' \
' pass\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.project.do(replacer.get_changes('_New'))
expected = 'class C(object):\n\n' \
' def a_func(self):\n' \
' return _New(self)()\n\n' \
' def another_func(self):\n' \
' pass\n\n\n' \
'class _New(object):\n\n' \
' def __init__(self, host):\n' \
' self.self = host\n\n' \
' def __call__(self):\n' \
' return 1\n'
self.assertEquals(expected, self.mod.read())
class IntroduceFactoryTest(unittest.TestCase):
def setUp(self):
super(IntroduceFactoryTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super(IntroduceFactoryTest, self).tearDown()
def _introduce_factory(self, resource, offset, *args, **kwds):
factory_introducer = IntroduceFactory(self.project,
resource, offset)
changes = factory_introducer.get_changes(*args, **kwds)
self.project.do(changes)
def test_adding_the_method(self):
code = 'class AClass(object):\n an_attr = 10\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
self._introduce_factory(mod, mod.read().index('AClass') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_changing_occurances_in_the_main_module(self):
code = 'class AClass(object):\n' \
' an_attr = 10\n' \
'a_var = AClass()'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'\
'a_var = AClass.create()'
self._introduce_factory(mod, mod.read().index('AClass') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_changing_occurances_with_arguments(self):
code = 'class AClass(object):\n' \
' def __init__(self, arg):\n' \
' pass\n' \
'a_var = AClass(10)\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' def __init__(self, arg):\n' \
' pass\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n' \
'a_var = AClass.create(10)\n'
self._introduce_factory(mod, mod.read().index('AClass') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_changing_occurances_in_other_modules(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class AClass(object):\n an_attr = 10\n')
mod2.write('import mod1\na_var = mod1.AClass()\n')
self._introduce_factory(mod1, mod1.read().index('AClass') + 1, 'create')
expected1 = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
expected2 = 'import mod1\n' \
'a_var = mod1.AClass.create()\n'
self.assertEquals(expected1, mod1.read())
self.assertEquals(expected2, mod2.read())
@testutils.assert_raises(RefactoringError)
def test_raising_exception_for_non_classes(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('def a_func():\n pass\n')
self._introduce_factory(mod, mod.read().index('a_func') + 1, 'create')
def test_undoing_introduce_factory(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
code1 = 'class AClass(object):\n an_attr = 10\n'
mod1.write(code1)
code2 = 'from mod1 import AClass\na_var = AClass()\n'
mod2.write(code2)
self._introduce_factory(mod1, mod1.read().index('AClass') + 1, 'create')
self.project.history.undo()
self.assertEquals(code1, mod1.read())
self.assertEquals(code2, mod2.read())
def test_using_on_an_occurance_outside_the_main_module(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class AClass(object):\n an_attr = 10\n')
mod2.write('import mod1\na_var = mod1.AClass()\n')
self._introduce_factory(mod2, mod2.read().index('AClass') + 1, 'create')
expected1 = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
expected2 = 'import mod1\n' \
'a_var = mod1.AClass.create()\n'
self.assertEquals(expected1, mod1.read())
self.assertEquals(expected2, mod2.read())
def test_introduce_factory_in_nested_scopes(self):
code = 'def create_var():\n'\
' class AClass(object):\n'\
' an_attr = 10\n'\
' return AClass()\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'def create_var():\n'\
' class AClass(object):\n'\
' an_attr = 10\n\n'\
' @staticmethod\n def create(*args, **kwds):\n'\
' return AClass(*args, **kwds)\n'\
' return AClass.create()\n'
self._introduce_factory(mod, mod.read().index('AClass') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_adding_factory_for_global_factories(self):
code = 'class AClass(object):\n an_attr = 10\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
'def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
self._introduce_factory(mod, mod.read().index('AClass') + 1,
'create', global_factory=True)
self.assertEquals(expected, mod.read())
def test_get_name_for_factories(self):
code = 'class C(object):\n pass\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
factory = IntroduceFactory(self.project, mod,
mod.read().index('C') + 1)
self.assertEquals('C', factory.get_name())
@testutils.assert_raises(RefactoringError)
def test_raising_exception_for_global_factory_for_nested_classes(self):
code = 'def create_var():\n'\
' class AClass(object):\n'\
' an_attr = 10\n'\
' return AClass()\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
self._introduce_factory(mod, mod.read().index('AClass') + 1,
'create', global_factory=True)
def test_changing_occurances_in_the_main_module_for_global_factories(self):
code = 'class AClass(object):\n' \
' an_attr = 10\n' \
'a_var = AClass()'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n an_attr = 10\n\n' \
'def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'\
'a_var = create()'
self._introduce_factory(mod, mod.read().index('AClass') + 1,
'create', global_factory=True)
self.assertEquals(expected, mod.read())
def test_changing_occurances_in_other_modules_for_global_factories(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class AClass(object):\n an_attr = 10\n')
mod2.write('import mod1\na_var = mod1.AClass()\n')
self._introduce_factory(mod1, mod1.read().index('AClass') + 1,
'create', global_factory=True)
expected1 = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
'def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
expected2 = 'import mod1\n' \
'a_var = mod1.create()\n'
self.assertEquals(expected1, mod1.read())
self.assertEquals(expected2, mod2.read())
def test_importing_if_necessary_in_other_modules_for_global_factories(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class AClass(object):\n an_attr = 10\n')
mod2.write('from mod1 import AClass\npair = AClass(), AClass\n')
self._introduce_factory(mod1, mod1.read().index('AClass') + 1,
'create', global_factory=True)
expected1 = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
'def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
expected2 = 'from mod1 import AClass, create\n' \
'pair = create(), AClass\n'
self.assertEquals(expected1, mod1.read())
self.assertEquals(expected2, mod2.read())
def test_changing_occurances_for_renamed_classes(self):
code = 'class AClass(object):\n an_attr = 10\na_class = AClass\na_var = a_class()'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n' \
'a_class = AClass\n' \
'a_var = a_class()'
self._introduce_factory(mod, mod.read().index('a_class') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_changing_occurrences_in_the_same_module_with_conflicting_ranges(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def create(self):\n' \
' return C()\n'
mod.write(code)
self._introduce_factory(mod, mod.read().index('C'), 'create_c', True)
expected = 'class C(object):\n' \
' def create(self):\n' \
' return create_c()\n'
self.assertTrue(mod.read().startswith(expected))
def _transform_module_to_package(self, resource):
self.project.do(rope.refactor.ModuleToPackage(
self.project, resource).get_changes())
def test_transform_module_to_package(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('import mod2\nfrom mod2 import AClass\n')
mod2 = testutils.create_module(self.project, 'mod2')
mod2.write('class AClass(object):\n pass\n')
self._transform_module_to_package(mod2)
mod2 = self.project.get_resource('mod2')
root_folder = self.project.root
self.assertFalse(root_folder.has_child('mod2.py'))
self.assertEquals('class AClass(object):\n pass\n',
root_folder.get_child('mod2').
get_child('__init__.py').read())
def test_transform_module_to_package_undoing(self):
pkg = testutils.create_package(self.project, 'pkg')
mod = testutils.create_module(self.project, 'mod', pkg)
self._transform_module_to_package(mod)
self.assertFalse(pkg.has_child('mod.py'))
self.assertTrue(pkg.get_child('mod').has_child('__init__.py'))
self.project.history.undo()
self.assertTrue(pkg.has_child('mod.py'))
self.assertFalse(pkg.has_child('mod'))
def test_transform_module_to_package_with_relative_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod1.write('import mod2\nfrom mod2 import AClass\n')
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('class AClass(object):\n pass\n')
self._transform_module_to_package(mod1)
new_init = self.project.get_resource('pkg/mod1/__init__.py')
self.assertEquals('import pkg.mod2\nfrom pkg.mod2 import AClass\n',
new_init.read())
def test_resources_parameter(self):
code = 'class A(object):\n an_attr = 10\n'
code1 = 'import mod\na = mod.A()\n'
mod = testutils.create_module(self.project, 'mod')
mod1 = testutils.create_module(self.project, 'mod1')
mod.write(code)
mod1.write(code1)
expected = 'class A(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return A(*args, **kwds)\n'
self._introduce_factory(mod, mod.read().index('A') + 1,
'create', resources=[mod])
self.assertEquals(expected, mod.read())
self.assertEquals(code1, mod1.read())
class EncapsulateFieldTest(unittest.TestCase):
def setUp(self):
super(EncapsulateFieldTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
self.mod1 = testutils.create_module(self.project, 'mod1')
self.a_class = 'class A(object):\n' \
' def __init__(self):\n' \
' self.attr = 1\n'
self.added_methods = '\n' \
' def get_attr(self):\n' \
' return self.attr\n\n' \
' def set_attr(self, value):\n' \
' self.attr = value\n'
self.encapsulated = self.a_class + self.added_methods
def tearDown(self):
testutils.remove_project(self.project)
super(EncapsulateFieldTest, self).tearDown()
def _encapsulate(self, resource, offset, **args):
changes = EncapsulateField(self.project, resource, offset).\
get_changes(**args)
self.project.do(changes)
def test_adding_getters_and_setters(self):
code = self.a_class
self.mod.write(code)
self._encapsulate(self.mod, code.index('attr') + 1)
self.assertEquals(self.encapsulated, self.mod.read())
def test_changing_getters_in_other_modules(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'range(a_var.attr)\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'range(a_var.get_attr())\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_setters_in_other_modules(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr = 1\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.set_attr(1)\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_getters_in_setters(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr = 1 + a_var.attr\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.set_attr(1 + a_var.get_attr())\n'
self.assertEquals(expected, self.mod1.read())
def test_appending_to_class_end(self):
self.mod1.write(self.a_class + 'a_var = A()\n')
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
self.assertEquals(self.encapsulated + 'a_var = A()\n',
self.mod1.read())
def test_performing_in_other_modules(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'range(a_var.attr)\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
self.assertEquals(self.encapsulated, self.mod.read())
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'range(a_var.get_attr())\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_main_module_occurances(self):
code = self.a_class + \
'a_var = A()\n' \
'a_var.attr = a_var.attr * 2\n'
self.mod1.write(code)
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
expected = self.encapsulated + \
'a_var = A()\n' \
'a_var.set_attr(a_var.get_attr() * 2)\n'
self.assertEquals(expected, self.mod1.read())
@testutils.assert_raises(RefactoringError)
def test_raising_exception_when_performed_on_non_attributes(self):
self.mod1.write('attr = 10')
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
@testutils.assert_raises(RefactoringError)
def test_raising_exception_on_tuple_assignments(self):
self.mod.write(self.a_class)
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr = 1\n' \
'a_var.attr, b = 1, 2\n'
self.mod1.write(code)
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
@testutils.assert_raises(RefactoringError)
def test_raising_exception_on_tuple_assignments2(self):
self.mod.write(self.a_class)
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr = 1\n' \
'b, a_var.attr = 1, 2\n'
self.mod1.write(code)
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
def test_tuple_assignments_and_function_calls(self):
code = 'import mod\n' \
'def func(a1=0, a2=0):\n' \
' pass\n' \
'a_var = mod.A()\n' \
'func(a_var.attr, a2=2)\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'def func(a1=0, a2=0):\n' \
' pass\n' \
'a_var = mod.A()\n' \
'func(a_var.get_attr(), a2=2)\n'
self.assertEquals(expected, self.mod1.read())
def test_tuple_assignments(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a, b = a_var.attr, 1\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a, b = a_var.get_attr(), 1\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_augmented_assignments(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr += 1\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.set_attr(a_var.get_attr() + 1)\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_augmented_assignments2(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr <<= 1\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.set_attr(a_var.get_attr() << 1)\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_occurrences_inside_the_class(self):
new_class = self.a_class + '\n' \
' def a_func(self):\n' \
' self.attr = 1\n'
self.mod.write(new_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = self.a_class + '\n' \
' def a_func(self):\n' \
' self.set_attr(1)\n' + \
self.added_methods
self.assertEquals(expected, self.mod.read())
def test_getter_and_setter_parameters(self):
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1,
getter='getAttr', setter='setAttr')
new_methods = self.added_methods.replace('get_attr', 'getAttr').\
replace('set_attr', 'setAttr')
expected = self.a_class + new_methods
self.assertEquals(expected, self.mod.read())
def test_using_resources_parameter(self):
self.mod1.write('import mod\na = mod.A()\nvar = a.attr\n')
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1,
resources=[self.mod])
self.assertEquals('import mod\na = mod.A()\nvar = a.attr\n',
self.mod1.read())
expected = self.a_class + self.added_methods
self.assertEquals(expected, self.mod.read())
class LocalToFieldTest(unittest.TestCase):
def setUp(self):
super(LocalToFieldTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
def tearDown(self):
testutils.remove_project(self.project)
super(LocalToFieldTest, self).tearDown()
def _perform_convert_local_variable_to_field(self, resource, offset):
changes = LocalToField(
self.project, resource, offset).get_changes()
self.project.do(changes)
def test_simple_local_to_field(self):
code = 'class A(object):\n' \
' def a_func(self):\n' \
' var = 10\n'
self.mod.write(code)
self._perform_convert_local_variable_to_field(self.mod,
code.index('var') + 1)
expected = 'class A(object):\n' \
' def a_func(self):\n' \
' self.var = 10\n'
self.assertEquals(expected, self.mod.read())
@testutils.assert_raises(RefactoringError)
def test_raising_exception_when_performed_on_a_global_var(self):
self.mod.write('var = 10\n')
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().index('var') + 1)
@testutils.assert_raises(RefactoringError)
def test_raising_exception_when_performed_on_field(self):
code = 'class A(object):\n' \
' def a_func(self):\n' \
' self.var = 10\n'
self.mod.write(code)
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().index('var') + 1)
@testutils.assert_raises(RefactoringError)
def test_raising_exception_when_performed_on_a_parameter(self):
code = 'class A(object):\n' \
' def a_func(self, var):\n' \
' a = var\n'
self.mod.write(code)
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().index('var') + 1)
# NOTE: This situation happens alot and is normally not an error
#@testutils.assert_raises(RefactoringError)
def test_not_raising_exception_when_there_is_a_field_with_the_same_name(self):
code = 'class A(object):\n' \
' def __init__(self):\n' \
' self.var = 1\n' \
' def a_func(self):\n var = 10\n'
self.mod.write(code)
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().rindex('var') + 1)
def test_local_to_field_with_self_renamed(self):
code = 'class A(object):\n' \
' def a_func(myself):\n' \
' var = 10\n'
self.mod.write(code)
self._perform_convert_local_variable_to_field(self.mod,
code.index('var') + 1)
expected = 'class A(object):\n' \
' def a_func(myself):\n' \
' myself.var = 10\n'
self.assertEquals(expected, self.mod.read())
class IntroduceParameterTest(unittest.TestCase):
def setUp(self):
super(IntroduceParameterTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
def tearDown(self):
testutils.remove_project(self.project)
super(IntroduceParameterTest, self).tearDown()
def _introduce_parameter(self, offset, name):
rope.refactor.introduce_parameter.IntroduceParameter(
self.project, self.mod, offset).get_changes(name).do()
def test_simple_case(self):
code = 'var = 1\n' \
'def f():\n' \
' b = var\n'
self.mod.write(code)
offset = self.mod.read().rindex('var')
self._introduce_parameter(offset, 'var')
expected = 'var = 1\n' \
'def f(var=var):\n' \
' b = var\n'
self.assertEquals(expected, self.mod.read())
def test_changing_function_body(self):
code = 'var = 1\n' \
'def f():\n' \
' b = var\n'
self.mod.write(code)
offset = self.mod.read().rindex('var')
self._introduce_parameter(offset, 'p1')
expected = 'var = 1\n' \
'def f(p1=var):\n' \
' b = p1\n'
self.assertEquals(expected, self.mod.read())
@testutils.assert_raises(RefactoringError)
def test_unknown_variables(self):
self.mod.write('def f():\n b = var + c\n')
offset = self.mod.read().rindex('var')
self._introduce_parameter(offset, 'p1')
self.assertEquals('def f(p1=var):\n b = p1 + c\n',
self.mod.read())
@testutils.assert_raises(RefactoringError)
def test_failing_when_not_inside(self):
self.mod.write('var = 10\nb = var\n')
offset = self.mod.read().rindex('var')
self._introduce_parameter(offset, 'p1')
def test_attribute_accesses(self):
code = 'class C(object):\n' \
' a = 10\nc = C()\n' \
'def f():\n' \
' b = c.a\n'
self.mod.write(code)
offset = self.mod.read().rindex('a')
self._introduce_parameter(offset, 'p1')
expected = 'class C(object):\n' \
' a = 10\n' \
'c = C()\n' \
'def f(p1=c.a):\n' \
' b = p1\n'
self.assertEquals(expected, self.mod.read())
def test_introducing_parameters_for_methods(self):
code = 'var = 1\n' \
'class C(object):\n' \
' def f(self):\n' \
' b = var\n'
self.mod.write(code)
offset = self.mod.read().rindex('var')
self._introduce_parameter(offset, 'p1')
expected = 'var = 1\n' \
'class C(object):\n' \
' def f(self, p1=var):\n' \
' b = p1\n'
self.assertEquals(expected, self.mod.read())
class _MockTaskObserver(object):
def __init__(self):
self.called = 0
def __call__(self):
self.called += 1
class TaskHandleTest(unittest.TestCase):
def test_trivial_case(self):
handle = rope.base.taskhandle.TaskHandle()
self.assertFalse(handle.is_stopped())
def test_stopping(self):
handle = rope.base.taskhandle.TaskHandle()
handle.stop()
self.assertTrue(handle.is_stopped())
def test_job_sets(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset()
self.assertEquals([jobs], handle.get_jobsets())
def test_starting_and_finishing_jobs(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset(name='test job set', count=1)
jobs.started_job('job1')
jobs.finished_job()
@testutils.assert_raises(InterruptedTaskError)
def test_test_checking_status(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset()
handle.stop()
jobs.check_status()
@testutils.assert_raises(InterruptedTaskError)
def test_test_checking_status_when_starting(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset()
handle.stop()
jobs.started_job('job1')
def test_calling_the_observer_after_stopping(self):
handle = rope.base.taskhandle.TaskHandle()
observer = _MockTaskObserver()
handle.add_observer(observer)
handle.stop()
self.assertEquals(1, observer.called)
def test_calling_the_observer_after_creating_job_sets(self):
handle = rope.base.taskhandle.TaskHandle()
observer = _MockTaskObserver()
handle.add_observer(observer)
jobs = handle.create_jobset()
self.assertEquals(1, observer.called)
def test_calling_the_observer_when_starting_and_finishing_jobs(self):
handle = rope.base.taskhandle.TaskHandle()
observer = _MockTaskObserver()
handle.add_observer(observer)
jobs = handle.create_jobset(name='test job set', count=1)
jobs.started_job('job1')
jobs.finished_job()
self.assertEquals(3, observer.called)
def test_job_set_get_percent_done(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset(name='test job set', count=2)
self.assertEquals(0, jobs.get_percent_done())
jobs.started_job('job1')
jobs.finished_job()
self.assertEquals(50, jobs.get_percent_done())
jobs.started_job('job2')
jobs.finished_job()
self.assertEquals(100, jobs.get_percent_done())
def test_getting_job_name(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset(name='test job set', count=1)
self.assertEquals('test job set', jobs.get_name())
self.assertEquals(None, jobs.get_active_job_name())
jobs.started_job('job1')
self.assertEquals('job1', jobs.get_active_job_name())
def suite():
result = unittest.TestSuite()
result.addTests(ropetest.refactor.renametest.suite())
result.addTests(unittest.makeSuite(
ropetest.refactor.extracttest.ExtractMethodTest))
result.addTests(unittest.makeSuite(IntroduceFactoryTest))
result.addTests(unittest.makeSuite(
ropetest.refactor.movetest.MoveRefactoringTest))
result.addTests(ropetest.refactor.inlinetest.suite())
result.addTests(unittest.makeSuite(
ropetest.refactor.patchedasttest.PatchedASTTest))
result.addTests(unittest.makeSuite(EncapsulateFieldTest))
result.addTests(unittest.makeSuite(LocalToFieldTest))
result.addTests(unittest.makeSuite(
change_signature_test.ChangeSignatureTest))
result.addTests(unittest.makeSuite(IntroduceParameterTest))
result.addTests(ropetest.refactor.importutilstest.suite())
result.addTests(similarfindertest.suite())
result.addTests(unittest.makeSuite(TaskHandleTest))
result.addTests(unittest.makeSuite(ropetest.refactor.
restructuretest.RestructureTest))
result.addTests(unittest.makeSuite(ropetest.refactor.
suitestest.SuiteTest))
result.addTests(unittest.makeSuite(ropetest.refactor.multiprojecttest.
MultiProjectRefactoringTest))
result.addTests(unittest.makeSuite(ropetest.refactor.usefunctiontest.
UseFunctionTest))
return result
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
unittest.main()
else:
runner = unittest.TextTestRunner()
result = runner.run(suite())
sys.exit(not result.wasSuccessful()) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/ropetest/refactor/__init__.py | 0.557123 | 0.219986 | __init__.py | pypi |
import rope.base.codeanalyze
import rope.base.evaluate
from rope.base import worder, exceptions, utils
from rope.base.codeanalyze import ArrayLinesAdapter, LogicalLineFinder
class FixSyntax(object):
def __init__(self, pycore, code, resource, maxfixes=1):
self.pycore = pycore
self.code = code
self.resource = resource
self.maxfixes = maxfixes
@utils.saveit
def get_pymodule(self):
"""Get a `PyModule`"""
errors = []
code = self.code
tries = 0
while True:
try:
if tries == 0 and self.resource is not None and \
self.resource.read() == code:
return self.pycore.resource_to_pyobject(self.resource,
force_errors=True)
return self.pycore.get_string_module(
code, resource=self.resource, force_errors=True)
except exceptions.ModuleSyntaxError as e:
if tries < self.maxfixes:
tries += 1
self.commenter.comment(e.lineno)
code = '\n'.join(self.commenter.lines)
errors.append(' * line %s: %s ... fixed' % (e.lineno,
e.message_))
else:
errors.append(' * line %s: %s ... raised!' % (e.lineno,
e.message_))
new_message = ('\nSyntax errors in file %s:\n' % e.filename) \
+ '\n'.join(errors)
raise exceptions.ModuleSyntaxError(e.filename, e.lineno,
new_message)
@property
@utils.saveit
def commenter(self):
return _Commenter(self.code)
def pyname_at(self, offset):
pymodule = self.get_pymodule()
def old_pyname():
word_finder = worder.Worder(self.code, True)
expression = word_finder.get_primary_at(offset)
expression = expression.replace('\\\n', ' ').replace('\n', ' ')
lineno = self.code.count('\n', 0, offset)
scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
return rope.base.evaluate.eval_str(scope, expression)
new_code = pymodule.source_code
def new_pyname():
newoffset = self.commenter.transfered_offset(offset)
return rope.base.evaluate.eval_location(pymodule, newoffset)
if new_code.startswith(self.code[:offset + 1]):
return new_pyname()
result = old_pyname()
if result is None:
return new_pyname()
return result
class _Commenter(object):
def __init__(self, code):
self.code = code
self.lines = self.code.split('\n')
self.lines.append('\n')
self.origs = list(range(len(self.lines) + 1))
self.diffs = [0] * (len(self.lines) + 1)
def comment(self, lineno):
start = _logical_start(self.lines, lineno, check_prev=True) - 1
# using self._get_stmt_end() instead of self._get_block_end()
# to lower commented lines
end = self._get_stmt_end(start)
indents = _get_line_indents(self.lines[start])
if 0 < start:
last_lineno = self._last_non_blank(start - 1)
last_line = self.lines[last_lineno]
if last_line.rstrip().endswith(':'):
indents = _get_line_indents(last_line) + 4
self._set(start, ' ' * indents + 'pass')
for line in range(start + 1, end + 1):
self._set(line, self.lines[start])
self._fix_incomplete_try_blocks(lineno, indents)
def transfered_offset(self, offset):
lineno = self.code.count('\n', 0, offset)
diff = sum(self.diffs[:lineno])
return offset + diff
def _last_non_blank(self, start):
while start > 0 and self.lines[start].strip() == '':
start -= 1
return start
def _get_block_end(self, lineno):
end_line = lineno
base_indents = _get_line_indents(self.lines[lineno])
for i in range(lineno + 1, len(self.lines)):
if _get_line_indents(self.lines[i]) >= base_indents:
end_line = i
else:
break
return end_line
def _get_stmt_end(self, lineno):
end_line = lineno
base_indents = _get_line_indents(self.lines[lineno])
for i in range(lineno + 1, len(self.lines)):
if _get_line_indents(self.lines[i]) <= base_indents:
return i - 1
return lineno
def _fix_incomplete_try_blocks(self, lineno, indents):
block_start = lineno
last_indents = current_indents = indents
while block_start > 0:
block_start = rope.base.codeanalyze.get_block_start(
ArrayLinesAdapter(self.lines), block_start) - 1
if self.lines[block_start].strip().startswith('try:'):
indents = _get_line_indents(self.lines[block_start])
if indents > last_indents:
continue
last_indents = indents
block_end = self._find_matching_deindent(block_start)
line = self.lines[block_end].strip()
if not (line.startswith('finally:') or
line.startswith('except ') or
line.startswith('except:')):
self._insert(block_end, ' ' * indents + 'finally:')
self._insert(block_end + 1, ' ' * indents + ' pass')
def _find_matching_deindent(self, line_number):
indents = _get_line_indents(self.lines[line_number])
current_line = line_number + 1
while current_line < len(self.lines):
line = self.lines[current_line]
if not line.strip().startswith('#') and not line.strip() == '':
# HACK: We should have used logical lines here
if _get_line_indents(self.lines[current_line]) <= indents:
return current_line
current_line += 1
return len(self.lines) - 1
def _set(self, lineno, line):
self.diffs[self.origs[lineno]] += len(line) - len(self.lines[lineno])
self.lines[lineno] = line
def _insert(self, lineno, line):
self.diffs[self.origs[lineno]] += len(line) + 1
self.origs.insert(lineno, self.origs[lineno])
self.lines.insert(lineno, line)
def _logical_start(lines, lineno, check_prev=False):
logical_finder = LogicalLineFinder(ArrayLinesAdapter(lines))
if check_prev:
prev = lineno - 1
while prev > 0:
start, end = logical_finder.logical_line_in(prev)
if end is None or start <= lineno < end:
return start
if start <= prev:
break
prev -= 1
return logical_finder.logical_line_in(lineno)[0]
def _get_line_indents(line):
return rope.base.codeanalyze.count_line_indents(line) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/contrib/fixsyntax.py | 0.504394 | 0.151875 | fixsyntax.py | pypi |
from rope.base import ast, evaluate, pyobjects
def find_errors(project, resource):
"""Find possible bad name and attribute accesses
It returns a list of `Error`\s.
"""
pymodule = project.pycore.resource_to_pyobject(resource)
finder = _BadAccessFinder(pymodule)
ast.walk(pymodule.get_ast(), finder)
return finder.errors
class _BadAccessFinder(object):
def __init__(self, pymodule):
self.pymodule = pymodule
self.scope = pymodule.get_scope()
self.errors = []
def _Name(self, node):
if isinstance(node.ctx, (ast.Store, ast.Param)):
return
scope = self.scope.get_inner_scope_for_line(node.lineno)
pyname = scope.lookup(node.id)
if pyname is None:
self._add_error(node, 'Unresolved variable')
elif self._is_defined_after(scope, pyname, node.lineno):
self._add_error(node, 'Defined later')
def _Attribute(self, node):
if not isinstance(node.ctx, ast.Store):
scope = self.scope.get_inner_scope_for_line(node.lineno)
pyname = evaluate.eval_node(scope, node.value)
if pyname is not None and \
pyname.get_object() != pyobjects.get_unknown():
if node.attr not in pyname.get_object():
self._add_error(node, 'Unresolved attribute')
ast.walk(node.value, self)
def _add_error(self, node, msg):
if isinstance(node, ast.Attribute):
name = node.attr
else:
name = node.id
if name != 'None':
error = Error(node.lineno, msg + ' ' + name)
self.errors.append(error)
def _is_defined_after(self, scope, pyname, lineno):
location = pyname.get_definition_location()
if location is not None and location[1] is not None:
if location[0] == self.pymodule and \
lineno <= location[1] <= scope.get_end():
return True
class Error(object):
def __init__(self, lineno, error):
self.lineno = lineno
self.error = error
def __str__(self):
return '%s: %s' % (self.lineno, self.error) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/contrib/finderrors.py | 0.712132 | 0.23445 | finderrors.py | pypi |
import re
from rope.base import (exceptions, pynames, resourceobserver,
taskhandle, pyobjects, builtins, resources)
from rope.refactor import importutils
class AutoImport(object):
"""A class for finding the module that provides a name
This class maintains a cache of global names in python modules.
Note that this cache is not accurate and might be out of date.
"""
def __init__(self, project, observe=True, underlined=False):
"""Construct an AutoImport object
If `observe` is `True`, listen for project changes and update
the cache.
If `underlined` is `True`, underlined names are cached, too.
"""
self.project = project
self.underlined = underlined
self.names = project.data_files.read_data('globalnames')
if self.names is None:
self.names = {}
project.data_files.add_write_hook(self._write)
# XXX: using a filtered observer
observer = resourceobserver.ResourceObserver(
changed=self._changed, moved=self._moved, removed=self._removed)
if observe:
project.add_observer(observer)
def import_assist(self, starting):
"""Return a list of ``(name, module)`` tuples
This function tries to find modules that have a global name
that starts with `starting`.
"""
# XXX: breaking if gave up! use generators
result = []
for module in self.names:
for global_name in self.names[module]:
if global_name.startswith(starting):
result.append((global_name, module))
return result
def get_modules(self, name):
"""Return the list of modules that have global `name`"""
result = []
for module in self.names:
if name in self.names[module]:
result.append(module)
return result
def get_all_names(self):
"""Return the list of all cached global names"""
result = set()
for module in self.names:
result.update(set(self.names[module]))
return result
def get_name_locations(self, name):
"""Return a list of ``(resource, lineno)`` tuples"""
result = []
pycore = self.project.pycore
for module in self.names:
if name in self.names[module]:
try:
pymodule = pycore.get_module(module)
if name in pymodule:
pyname = pymodule[name]
module, lineno = pyname.get_definition_location()
if module is not None:
resource = module.get_module().get_resource()
if resource is not None and lineno is not None:
result.append((resource, lineno))
except exceptions.ModuleNotFoundError:
pass
return result
def generate_cache(self, resources=None, underlined=None,
task_handle=taskhandle.NullTaskHandle()):
"""Generate global name cache for project files
If `resources` is a list of `rope.base.resource.File`\s, only
those files are searched; otherwise all python modules in the
project are cached.
"""
if resources is None:
resources = self.project.pycore.get_python_files()
job_set = task_handle.create_jobset(
'Generatig autoimport cache', len(resources))
for file in resources:
job_set.started_job('Working on <%s>' % file.path)
self.update_resource(file, underlined)
job_set.finished_job()
def generate_modules_cache(self, modules, underlined=None,
task_handle=taskhandle.NullTaskHandle()):
"""Generate global name cache for modules listed in `modules`"""
job_set = task_handle.create_jobset(
'Generatig autoimport cache for modules', len(modules))
for modname in modules:
job_set.started_job('Working on <%s>' % modname)
if modname.endswith('.*'):
mod = self.project.pycore.find_module(modname[:-2])
if mod:
for sub in submodules(mod):
self.update_resource(sub, underlined)
else:
self.update_module(modname, underlined)
job_set.finished_job()
def clear_cache(self):
"""Clear all entries in global-name cache
It might be a good idea to use this function before
regenerating global names.
"""
self.names.clear()
def find_insertion_line(self, code):
"""Guess at what line the new import should be inserted"""
match = re.search(r'^(def|class)\s+', code)
if match is not None:
code = code[:match.start()]
try:
pymodule = self.project.pycore.get_string_module(code)
except exceptions.ModuleSyntaxError:
return 1
testmodname = '__rope_testmodule_rope'
importinfo = importutils.NormalImport(((testmodname, None),))
module_imports = importutils.get_module_imports(
self.project.pycore, pymodule)
module_imports.add_import(importinfo)
code = module_imports.get_changed_source()
offset = code.index(testmodname)
lineno = code.count('\n', 0, offset) + 1
return lineno
def update_resource(self, resource, underlined=None):
"""Update the cache for global names in `resource`"""
try:
pymodule = self.project.pycore.resource_to_pyobject(resource)
modname = self._module_name(resource)
self._add_names(pymodule, modname, underlined)
except exceptions.ModuleSyntaxError:
pass
def update_module(self, modname, underlined=None):
"""Update the cache for global names in `modname` module
`modname` is the name of a module.
"""
try:
pymodule = self.project.pycore.get_module(modname)
self._add_names(pymodule, modname, underlined)
except exceptions.ModuleNotFoundError:
pass
def _module_name(self, resource):
return self.project.pycore.modname(resource)
def _add_names(self, pymodule, modname, underlined):
if underlined is None:
underlined = self.underlined
globals = []
if isinstance(pymodule, pyobjects.PyDefinedObject):
attributes = pymodule._get_structural_attributes()
else:
attributes = pymodule.get_attributes()
for name, pyname in attributes.items():
if not underlined and name.startswith('_'):
continue
if isinstance(pyname, (pynames.AssignedName, pynames.DefinedName)):
globals.append(name)
if isinstance(pymodule, builtins.BuiltinModule):
globals.append(name)
self.names[modname] = globals
def _write(self):
self.project.data_files.write_data('globalnames', self.names)
def _changed(self, resource):
if not resource.is_folder():
self.update_resource(resource)
def _moved(self, resource, newresource):
if not resource.is_folder():
modname = self._module_name(resource)
if modname in self.names:
del self.names[modname]
self.update_resource(newresource)
def _removed(self, resource):
if not resource.is_folder():
modname = self._module_name(resource)
if modname in self.names:
del self.names[modname]
def submodules(mod):
if isinstance(mod, resources.File):
if mod.name.endswith('.py') and mod.name != '__init__.py':
return set([mod])
return set()
if not mod.has_child('__init__.py'):
return set()
result = set([mod])
for child in mod.get_children():
result |= submodules(child)
return result | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/contrib/autoimport.py | 0.593491 | 0.159217 | autoimport.py | pypi |
import rope.base.codeanalyze
import rope.base.evaluate
import rope.base.pyobjects
from rope.base import taskhandle, exceptions, worder
from rope.contrib import fixsyntax
from rope.refactor import occurrences
def find_occurrences(project, resource, offset, unsure=False, resources=None,
in_hierarchy=False, task_handle=taskhandle.NullTaskHandle()):
"""Return a list of `Location`\s
If `unsure` is `True`, possible matches are returned, too. You
can use `Location.unsure` to see which are unsure occurrences.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
"""
name = worder.get_name_at(resource, offset)
this_pymodule = project.pycore.resource_to_pyobject(resource)
primary, pyname = rope.base.evaluate.eval_location2(
this_pymodule, offset)
def is_match(occurrence):
return unsure
finder = occurrences.create_finder(
project.pycore, name, pyname, unsure=is_match,
in_hierarchy=in_hierarchy, instance=primary)
if resources is None:
resources = project.pycore.get_python_files()
job_set = task_handle.create_jobset('Finding Occurrences',
count=len(resources))
return _find_locations(finder, resources, job_set)
def find_implementations(project, resource, offset, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Find the places a given method is overridden.
Finds the places a method is implemented. Returns a list of
`Location`\s.
"""
name = worder.get_name_at(resource, offset)
this_pymodule = project.pycore.resource_to_pyobject(resource)
pyname = rope.base.evaluate.eval_location(this_pymodule, offset)
if pyname is not None:
pyobject = pyname.get_object()
if not isinstance(pyobject, rope.base.pyobjects.PyFunction) or \
pyobject.get_kind() != 'method':
raise exceptions.BadIdentifierError('Not a method!')
else:
raise exceptions.BadIdentifierError('Cannot resolve the identifier!')
def is_defined(occurrence):
if not occurrence.is_defined():
return False
def not_self(occurrence):
if occurrence.get_pyname().get_object() == pyname.get_object():
return False
filters = [is_defined, not_self,
occurrences.InHierarchyFilter(pyname, True)]
finder = occurrences.Finder(project.pycore, name, filters=filters)
if resources is None:
resources = project.pycore.get_python_files()
job_set = task_handle.create_jobset('Finding Implementations',
count=len(resources))
return _find_locations(finder, resources, job_set)
def find_definition(project, code, offset, resource=None, maxfixes=1):
"""Return the definition location of the python name at `offset`
A `Location` object is returned if the definition location can be
determined, otherwise ``None`` is returned.
"""
fixer = fixsyntax.FixSyntax(project.pycore, code, resource, maxfixes)
main_module = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is not None:
module, lineno = pyname.get_definition_location()
name = rope.base.worder.Worder(code).get_word_at(offset)
if lineno is not None:
start = module.lines.get_line_start(lineno)
def check_offset(occurrence):
if occurrence.offset < start:
return False
pyname_filter = occurrences.PyNameFilter(pyname)
finder = occurrences.Finder(project.pycore, name,
[check_offset, pyname_filter])
for occurrence in finder.find_occurrences(pymodule=module):
return Location(occurrence)
class Location(object):
def __init__(self, occurrence):
self.resource = occurrence.resource
self.region = occurrence.get_word_range()
self.offset = self.region[0]
self.unsure = occurrence.is_unsure()
self.lineno = occurrence.lineno
def _find_locations(finder, resources, job_set):
result = []
for resource in resources:
job_set.started_job(resource.path)
for occurrence in finder.find_occurrences(resource):
result.append(Location(occurrence))
job_set.finished_job()
return result | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/contrib/findit.py | 0.69368 | 0.265875 | findit.py | pypi |
import keyword
import sys
import warnings
import rope.base.codeanalyze
import rope.base.evaluate
from rope.base import pyobjects, pyobjectsdef, pynames, builtins, exceptions, worder
from rope.base.codeanalyze import SourceLinesAdapter
from rope.contrib import fixsyntax
from rope.refactor import functionutils
def code_assist(project, source_code, offset, resource=None,
templates=None, maxfixes=1, later_locals=True):
"""Return python code completions as a list of `CodeAssistProposal`\s
`resource` is a `rope.base.resources.Resource` object. If
provided, relative imports are handled.
`maxfixes` is the maximum number of errors to fix if the code has
errors in it.
If `later_locals` is `False` names defined in this scope and after
this line is ignored.
"""
if templates is not None:
warnings.warn('Codeassist no longer supports templates',
DeprecationWarning, stacklevel=2)
assist = _PythonCodeAssist(
project, source_code, offset, resource=resource,
maxfixes=maxfixes, later_locals=later_locals)
return assist()
def starting_offset(source_code, offset):
"""Return the offset in which the completion should be inserted
Usually code assist proposals should be inserted like::
completion = proposal.name
result = (source_code[:starting_offset] +
completion + source_code[offset:])
Where starting_offset is the offset returned by this function.
"""
word_finder = worder.Worder(source_code, True)
expression, starting, starting_offset = \
word_finder.get_splitted_primary_before(offset)
return starting_offset
def get_doc(project, source_code, offset, resource=None, maxfixes=1):
"""Get the pydoc"""
fixer = fixsyntax.FixSyntax(project.pycore, source_code,
resource, maxfixes)
pymodule = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is None:
return None
pyobject = pyname.get_object()
return PyDocExtractor().get_doc(pyobject)
def get_calltip(project, source_code, offset, resource=None,
maxfixes=1, ignore_unknown=False, remove_self=False):
"""Get the calltip of a function
The format of the returned string is
``module_name.holding_scope_names.function_name(arguments)``. For
classes `__init__()` and for normal objects `__call__()` function
is used.
Note that the offset is on the function itself *not* after the its
open parenthesis. (Actually it used to be the other way but it
was easily confused when string literals were involved. So I
decided it is better for it not to try to be too clever when it
cannot be clever enough). You can use a simple search like::
offset = source_code.rindex('(', 0, offset) - 1
to handle simple situations.
If `ignore_unknown` is `True`, `None` is returned for functions
without source-code like builtins and extensions.
If `remove_self` is `True`, the first parameter whose name is self
will be removed for methods.
"""
fixer = fixsyntax.FixSyntax(project.pycore, source_code,
resource, maxfixes)
pymodule = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is None:
return None
pyobject = pyname.get_object()
return PyDocExtractor().get_calltip(pyobject, ignore_unknown, remove_self)
def get_definition_location(project, source_code, offset,
resource=None, maxfixes=1):
"""Return the definition location of the python name at `offset`
Return a (`rope.base.resources.Resource`, lineno) tuple. If no
`resource` is given and the definition is inside the same module,
the first element of the returned tuple would be `None`. If the
location cannot be determined ``(None, None)`` is returned.
"""
fixer = fixsyntax.FixSyntax(project.pycore, source_code,
resource, maxfixes)
pymodule = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is not None:
module, lineno = pyname.get_definition_location()
if module is not None:
return module.get_module().get_resource(), lineno
return (None, None)
def find_occurrences(*args, **kwds):
import rope.contrib.findit
warnings.warn('Use `rope.contrib.findit.find_occurrences()` instead',
DeprecationWarning, stacklevel=2)
return rope.contrib.findit.find_occurrences(*args, **kwds)
class CompletionProposal(object):
"""A completion proposal
The `scope` instance variable shows where proposed name came from
and can be 'global', 'local', 'builtin', 'attribute', 'keyword',
'imported', 'parameter_keyword'.
The `type` instance variable shows the approximate type of the
proposed object and can be 'instance', 'class', 'function', 'module',
and `None`.
All possible relations between proposal's `scope` and `type` are shown
in the table below (different scopes in rows and types in columns):
| instance | class | function | module | None
local | + | + | + | + |
global | + | + | + | + |
builtin | + | + | + | |
attribute | + | + | + | + |
imported | + | + | + | + |
keyword | | | | | +
parameter_keyword | | | | | +
"""
def __init__(self, name, scope, pyname=None):
self.name = name
self.pyname = pyname
self.scope = self._get_scope(scope)
def __str__(self):
return '%s (%s, %s)' % (self.name, self.scope, self.type)
def __repr__(self):
return str(self)
@property
def parameters(self):
"""The names of the parameters the function takes.
Returns None if this completion is not a function.
"""
pyname = self.pyname
if isinstance(pyname, pynames.ImportedName):
pyname = pyname._get_imported_pyname()
if isinstance(pyname, pynames.DefinedName):
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
return pyobject.get_param_names()
@property
def type(self):
pyname = self.pyname
if isinstance(pyname, builtins.BuiltinName):
pyobject = pyname.get_object()
if isinstance(pyobject, builtins.BuiltinFunction):
return 'function'
elif isinstance(pyobject, builtins.BuiltinClass):
clsobj = pyobject.builtin
return 'class'
elif isinstance(pyobject, builtins.BuiltinObject) or \
isinstance(pyobject, builtins.BuiltinName):
return 'instance'
elif isinstance(pyname, pynames.ImportedModule):
return 'module'
elif isinstance(pyname, pynames.ImportedName) or \
isinstance(pyname, pynames.DefinedName):
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
return 'function'
if isinstance(pyobject, pyobjects.AbstractClass):
return 'class'
return 'instance'
def _get_scope(self, scope):
if isinstance(self.pyname, builtins.BuiltinName):
return 'builtin'
if isinstance(self.pyname, pynames.ImportedModule) or \
isinstance(self.pyname, pynames.ImportedName):
return 'imported'
return scope
def get_doc(self):
"""Get the proposed object's docstring.
Returns None if it can not be get.
"""
if not self.pyname:
return None
pyobject = self.pyname.get_object()
if not hasattr(pyobject, 'get_doc'):
return None
return self.pyname.get_object().get_doc()
@property
def kind(self):
warnings.warn("the proposal's `kind` property is deprecated, " \
"use `scope` instead")
return self.scope
# leaved for backward compatibility
CodeAssistProposal = CompletionProposal
class NamedParamProposal(CompletionProposal):
"""A parameter keyword completion proposal
Holds reference to ``_function`` -- the function which
parameter ``name`` belongs to. This allows to determine
default value for this parameter.
"""
def __init__(self, name, function):
self.argname = name
name = '%s=' % name
super(NamedParamProposal, self).__init__(name, 'parameter_keyword')
self._function = function
def get_default(self):
"""Get a string representation of a param's default value.
Returns None if there is no default value for this param.
"""
definfo = functionutils.DefinitionInfo.read(self._function)
for arg, default in definfo.args_with_defaults:
if self.argname == arg:
return default
return None
def sorted_proposals(proposals, scopepref=None, typepref=None):
"""Sort a list of proposals
Return a sorted list of the given `CodeAssistProposal`\s.
`scopepref` can be a list of proposal scopes. Defaults to
``['parameter_keyword', 'local', 'global', 'imported',
'attribute', 'builtin', 'keyword']``.
`typepref` can be a list of proposal types. Defaults to
``['class', 'function', 'instance', 'module', None]``.
(`None` stands for completions with no type like keywords.)
"""
sorter = _ProposalSorter(proposals, scopepref, typepref)
return sorter.get_sorted_proposal_list()
def starting_expression(source_code, offset):
"""Return the expression to complete"""
word_finder = worder.Worder(source_code, True)
expression, starting, starting_offset = \
word_finder.get_splitted_primary_before(offset)
if expression:
return expression + '.' + starting
return starting
def default_templates():
warnings.warn('default_templates() is deprecated.',
DeprecationWarning, stacklevel=2)
return {}
class _PythonCodeAssist(object):
def __init__(self, project, source_code, offset, resource=None,
maxfixes=1, later_locals=True):
self.project = project
self.pycore = self.project.pycore
self.code = source_code
self.resource = resource
self.maxfixes = maxfixes
self.later_locals = later_locals
self.word_finder = worder.Worder(source_code, True)
self.expression, self.starting, self.offset = \
self.word_finder.get_splitted_primary_before(offset)
keywords = keyword.kwlist
def _find_starting_offset(self, source_code, offset):
current_offset = offset - 1
while current_offset >= 0 and (source_code[current_offset].isalnum() or
source_code[current_offset] in '_'):
current_offset -= 1;
return current_offset + 1
def _matching_keywords(self, starting):
result = []
for kw in self.keywords:
if kw.startswith(starting):
result.append(CompletionProposal(kw, 'keyword'))
return result
def __call__(self):
if self.offset > len(self.code):
return []
completions = list(self._code_completions().values())
if self.expression.strip() == '' and self.starting.strip() != '':
completions.extend(self._matching_keywords(self.starting))
return completions
def _dotted_completions(self, module_scope, holding_scope):
result = {}
found_pyname = rope.base.evaluate.eval_str(holding_scope,
self.expression)
if found_pyname is not None:
element = found_pyname.get_object()
compl_scope = 'attribute'
if isinstance(element, (pyobjectsdef.PyModule,
pyobjectsdef.PyPackage)):
compl_scope = 'imported'
for name, pyname in element.get_attributes().items():
if name.startswith(self.starting):
result[name] = CompletionProposal(name, compl_scope, pyname)
return result
def _undotted_completions(self, scope, result, lineno=None):
if scope.parent != None:
self._undotted_completions(scope.parent, result)
if lineno is None:
names = scope.get_propagated_names()
else:
names = scope.get_names()
for name, pyname in names.items():
if name.startswith(self.starting):
compl_scope = 'local'
if scope.get_kind() == 'Module':
compl_scope = 'global'
if lineno is None or self.later_locals or \
not self._is_defined_after(scope, pyname, lineno):
result[name] = CompletionProposal(name, compl_scope,
pyname)
def _from_import_completions(self, pymodule):
module_name = self.word_finder.get_from_module(self.offset)
if module_name is None:
return {}
pymodule = self._find_module(pymodule, module_name)
result = {}
for name in pymodule:
if name.startswith(self.starting):
result[name] = CompletionProposal(name, scope='global',
pyname=pymodule[name])
return result
def _find_module(self, pymodule, module_name):
dots = 0
while module_name[dots] == '.':
dots += 1
pyname = pynames.ImportedModule(pymodule,
module_name[dots:], dots)
return pyname.get_object()
def _is_defined_after(self, scope, pyname, lineno):
location = pyname.get_definition_location()
if location is not None and location[1] is not None:
if location[0] == scope.pyobject.get_module() and \
lineno <= location[1] <= scope.get_end():
return True
def _code_completions(self):
lineno = self.code.count('\n', 0, self.offset) + 1
fixer = fixsyntax.FixSyntax(self.pycore, self.code,
self.resource, self.maxfixes)
pymodule = fixer.get_pymodule()
module_scope = pymodule.get_scope()
code = pymodule.source_code
lines = code.split('\n')
result = {}
start = fixsyntax._logical_start(lines, lineno)
indents = fixsyntax._get_line_indents(lines[start - 1])
inner_scope = module_scope.get_inner_scope_for_line(start, indents)
if self.word_finder.is_a_name_after_from_import(self.offset):
return self._from_import_completions(pymodule)
if self.expression.strip() != '':
result.update(self._dotted_completions(module_scope, inner_scope))
else:
result.update(self._keyword_parameters(module_scope.pyobject,
inner_scope))
self._undotted_completions(inner_scope, result, lineno=lineno)
return result
def _keyword_parameters(self, pymodule, scope):
offset = self.offset
if offset == 0:
return {}
word_finder = worder.Worder(self.code, True)
lines = SourceLinesAdapter(self.code)
lineno = lines.get_line_number(offset)
if word_finder.is_on_function_call_keyword(offset - 1):
name_finder = rope.base.evaluate.ScopeNameFinder(pymodule)
function_parens = word_finder.\
find_parens_start_from_inside(offset - 1)
primary = word_finder.get_primary_at(function_parens - 1)
try:
function_pyname = rope.base.evaluate.\
eval_str(scope, primary)
except exceptions.BadIdentifierError as e:
return {}
if function_pyname is not None:
pyobject = function_pyname.get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
pass
elif isinstance(pyobject, pyobjects.AbstractClass) and \
'__init__' in pyobject:
pyobject = pyobject['__init__'].get_object()
elif '__call__' in pyobject:
pyobject = pyobject['__call__'].get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
param_names = []
param_names.extend(
pyobject.get_param_names(special_args=False))
result = {}
for name in param_names:
if name.startswith(self.starting):
result[name + '='] = NamedParamProposal(
name, pyobject
)
return result
return {}
class _ProposalSorter(object):
"""Sort a list of code assist proposals"""
def __init__(self, code_assist_proposals, scopepref=None, typepref=None):
self.proposals = code_assist_proposals
if scopepref is None:
scopepref = ['parameter_keyword', 'local', 'global', 'imported',
'attribute', 'builtin', 'keyword']
self.scopepref = scopepref
if typepref is None:
typepref = ['class', 'function', 'instance', 'module', None]
self.typerank = dict((type, index)
for index, type in enumerate(typepref))
def get_sorted_proposal_list(self):
"""Return a list of `CodeAssistProposal`"""
proposals = {}
for proposal in self.proposals:
proposals.setdefault(proposal.scope, []).append(proposal)
result = []
for scope in self.scopepref:
scope_proposals = proposals.get(scope, [])
scope_proposals = [proposal for proposal in scope_proposals
if proposal.type in self.typerank]
scope_proposals.sort(key = self._proposal_cmp)
result.extend(scope_proposals)
return result
def _proposal_cmp(self, proposal):
def underline_count(name):
result = 0
while result < len(name) and name[result] == '_':
result += 1
return result
return (self.typerank.get(proposal.type, 100), underline_count(proposal.name), proposal.name)
def _compare_underlined_names(self, name1, name2):
def underline_count(name):
result = 0
while result < len(name) and name[result] == '_':
result += 1
return result
underline_count1 = underline_count(name1)
underline_count2 = underline_count(name2)
if underline_count1 != underline_count2:
return cmp(underline_count1, underline_count2)
return cmp(name1, name2)
class PyDocExtractor(object):
def get_doc(self, pyobject):
if isinstance(pyobject, pyobjects.AbstractFunction):
return self._get_function_docstring(pyobject)
elif isinstance(pyobject, pyobjects.AbstractClass):
return self._get_class_docstring(pyobject)
elif isinstance(pyobject, pyobjects.AbstractModule):
return self._trim_docstring(pyobject.get_doc())
return None
def get_calltip(self, pyobject, ignore_unknown=False, remove_self=False):
try:
if isinstance(pyobject, pyobjects.AbstractClass):
pyobject = pyobject['__init__'].get_object()
if not isinstance(pyobject, pyobjects.AbstractFunction):
pyobject = pyobject['__call__'].get_object()
except exceptions.AttributeNotFoundError:
return None
if ignore_unknown and not isinstance(pyobject, pyobjects.PyFunction):
return
if isinstance(pyobject, pyobjects.AbstractFunction):
result = self._get_function_signature(pyobject, add_module=True)
if remove_self and self._is_method(pyobject):
return result.replace('(self)', '()').replace('(self, ', '(')
return result
def _get_class_docstring(self, pyclass):
contents = self._trim_docstring(pyclass.get_doc(), 2)
supers = [super.get_name() for super in pyclass.get_superclasses()]
doc = 'class %s(%s):\n\n' % (pyclass.get_name(), ', '.join(supers)) + contents
if '__init__' in pyclass:
init = pyclass['__init__'].get_object()
if isinstance(init, pyobjects.AbstractFunction):
doc += '\n\n' + self._get_single_function_docstring(init)
return doc
def _get_function_docstring(self, pyfunction):
functions = [pyfunction]
if self._is_method(pyfunction):
functions.extend(self._get_super_methods(pyfunction.parent,
pyfunction.get_name()))
return '\n\n'.join([self._get_single_function_docstring(function)
for function in functions])
def _is_method(self, pyfunction):
return isinstance(pyfunction, pyobjects.PyFunction) and \
isinstance(pyfunction.parent, pyobjects.PyClass)
def _get_single_function_docstring(self, pyfunction):
signature = self._get_function_signature(pyfunction)
docs = self._trim_docstring(pyfunction.get_doc(), indents=2)
return signature + ':\n\n' + docs
def _get_super_methods(self, pyclass, name):
result = []
for super_class in pyclass.get_superclasses():
if name in super_class:
function = super_class[name].get_object()
if isinstance(function, pyobjects.AbstractFunction):
result.append(function)
result.extend(self._get_super_methods(super_class, name))
return result
def _get_function_signature(self, pyfunction, add_module=False):
location = self._location(pyfunction, add_module)
if isinstance(pyfunction, pyobjects.PyFunction):
info = functionutils.DefinitionInfo.read(pyfunction)
return location + info.to_string()
else:
return '%s(%s)' % (location + pyfunction.get_name(),
', '.join(pyfunction.get_param_names()))
def _location(self, pyobject, add_module=False):
location = []
parent = pyobject.parent
while parent and not isinstance(parent, pyobjects.AbstractModule):
location.append(parent.get_name())
location.append('.')
parent = parent.parent
if add_module:
if isinstance(pyobject, pyobjects.PyFunction):
module = pyobject.get_module()
location.insert(0, self._get_module(pyobject))
if isinstance(parent, builtins.BuiltinModule):
location.insert(0, parent.get_name() + '.')
return ''.join(location)
def _get_module(self, pyfunction):
module = pyfunction.get_module()
if module is not None:
resource = module.get_resource()
if resource is not None:
return pyfunction.pycore.modname(resource) + '.'
return ''
def _trim_docstring(self, docstring, indents=0):
"""The sample code from :PEP:`257`"""
if not docstring:
return ''
# Convert tabs to spaces (following normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join((' ' * indents + line for line in trimmed))
# Deprecated classes
class TemplateProposal(CodeAssistProposal):
def __init__(self, name, template):
warnings.warn('TemplateProposal is deprecated.',
DeprecationWarning, stacklevel=2)
super(TemplateProposal, self).__init__(name, 'template')
self.template = template
class Template(object):
def __init__(self, template):
self.template = template
warnings.warn('Template is deprecated.',
DeprecationWarning, stacklevel=2)
def variables(self):
return []
def substitute(self, mapping):
return self.template
def get_cursor_location(self, mapping):
return len(self.template) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/contrib/codeassist.py | 0.687945 | 0.272012 | codeassist.py | pypi |
from rope.base.fscommands import _decode_data
from rope.base import ast, exceptions, utils
class PyObject(object):
def __init__(self, type_):
if type_ is None:
type_ = self
self.type = type_
def get_attributes(self):
if self.type is self:
return {}
return self.type.get_attributes()
def get_attribute(self, name):
if name not in self.get_attributes():
raise exceptions.AttributeNotFoundError(
'Attribute %s not found' % name)
return self.get_attributes()[name]
def get_type(self):
return self.type
def __getitem__(self, key):
"""The same as ``get_attribute(key)``"""
return self.get_attribute(key)
def __contains__(self, key):
"""The same as ``key in self.get_attributes()``"""
return key in self.get_attributes()
def __eq__(self, obj):
"""Check the equality of two `PyObject`\s
Currently it is assumed that instances (the direct instances
of `PyObject`, not the instances of its subclasses) are equal
if their types are equal. For every other object like
defineds or builtins rope assumes objects are reference
objects and their identities should match.
"""
if self.__class__ != obj.__class__:
return False
if type(self) == PyObject:
if self is not self.type:
return self.type == obj.type
else:
return self.type is obj.type
return self is obj
def __ne__(self, obj):
return not self.__eq__(obj)
def __hash__(self):
"""See docs for `__eq__()` method"""
if type(self) == PyObject and self != self.type:
return hash(self.type) + 1
else:
return super(PyObject, self).__hash__()
def __iter__(self):
"""The same as ``iter(self.get_attributes())``"""
return iter(self.get_attributes())
_types = None
_unknown = None
@staticmethod
def _get_base_type(name):
if PyObject._types is None:
PyObject._types = {}
base_type = PyObject(None)
PyObject._types['Type'] = base_type
PyObject._types['Module'] = PyObject(base_type)
PyObject._types['Function'] = PyObject(base_type)
PyObject._types['Unknown'] = PyObject(base_type)
return PyObject._types[name]
def get_base_type(name):
"""Return the base type with name `name`.
The base types are 'Type', 'Function', 'Module' and 'Unknown'. It
was used to check the type of a `PyObject` but currently its use
is discouraged. Use classes defined in this module instead.
For example instead of
``pyobject.get_type() == get_base_type('Function')`` use
``isinstance(pyobject, AbstractFunction)``.
You can use `AbstractClass` for classes, `AbstractFunction` for
functions, and `AbstractModule` for modules. You can also use
`PyFunction` and `PyClass` for testing if an object is
defined somewhere and rope can access its source. These classes
provide more methods.
"""
return PyObject._get_base_type(name)
def get_unknown():
"""Return a pyobject whose type is unknown
Note that two unknown objects are equal. So for example you can
write::
if pyname.get_object() == get_unknown():
print 'cannot determine what this pyname holds'
Rope could have used `None` for indicating unknown objects but
we had to check that in many places. So actually this method
returns a null object.
"""
if PyObject._unknown is None:
PyObject._unknown = PyObject(get_base_type('Unknown'))
return PyObject._unknown
class AbstractClass(PyObject):
def __init__(self):
super(AbstractClass, self).__init__(get_base_type('Type'))
def get_name(self):
pass
def get_doc(self):
pass
def get_superclasses(self):
return []
class AbstractFunction(PyObject):
def __init__(self):
super(AbstractFunction, self).__init__(get_base_type('Function'))
def get_name(self):
pass
def get_doc(self):
pass
def get_param_names(self, special_args=True):
return []
def get_returned_object(self, args):
return get_unknown()
class AbstractModule(PyObject):
def __init__(self, doc=None):
super(AbstractModule, self).__init__(get_base_type('Module'))
def get_doc(self):
pass
def get_resource(self):
pass
class PyDefinedObject(object):
"""Python defined names that rope can access their sources"""
def __init__(self, pycore, ast_node, parent):
self.pycore = pycore
self.ast_node = ast_node
self.scope = None
self.parent = parent
self.structural_attributes = None
self.concluded_attributes = self.get_module()._get_concluded_data()
self.attributes = self.get_module()._get_concluded_data()
self.defineds = None
visitor_class = None
@utils.prevent_recursion(lambda: {})
def _get_structural_attributes(self):
if self.structural_attributes is None:
self.structural_attributes = self._create_structural_attributes()
return self.structural_attributes
@utils.prevent_recursion(lambda: {})
def _get_concluded_attributes(self):
if self.concluded_attributes.get() is None:
self._get_structural_attributes()
self.concluded_attributes.set(self._create_concluded_attributes())
return self.concluded_attributes.get()
def get_attributes(self):
if self.attributes.get() is None:
result = dict(self._get_concluded_attributes())
result.update(self._get_structural_attributes())
self.attributes.set(result)
return self.attributes.get()
def get_attribute(self, name):
if name in self._get_structural_attributes():
return self._get_structural_attributes()[name]
if name in self._get_concluded_attributes():
return self._get_concluded_attributes()[name]
raise exceptions.AttributeNotFoundError('Attribute %s not found' %
name)
def get_scope(self):
if self.scope is None:
self.scope = self._create_scope()
return self.scope
def get_module(self):
current_object = self
while current_object.parent is not None:
current_object = current_object.parent
return current_object
def get_doc(self):
if len(self.get_ast().body) > 0:
expr = self.get_ast().body[0]
if isinstance(expr, ast.Expr) and \
isinstance(expr.value, ast.Str):
docstring = expr.value.s
coding = self.get_module().coding
return _decode_data(docstring, coding)
def _get_defined_objects(self):
if self.defineds is None:
self._get_structural_attributes()
return self.defineds
def _create_structural_attributes(self):
if self.visitor_class is None:
return {}
new_visitor = self.visitor_class(self.pycore, self)
for child in ast.get_child_nodes(self.ast_node):
ast.walk(child, new_visitor)
self.defineds = new_visitor.defineds
return new_visitor.names
def _create_concluded_attributes(self):
return {}
def get_ast(self):
return self.ast_node
def _create_scope(self):
pass
class PyFunction(PyDefinedObject, AbstractFunction):
"""Only a placeholder"""
class PyClass(PyDefinedObject, AbstractClass):
"""Only a placeholder"""
class _ConcludedData(object):
def __init__(self):
self.data_ = None
def set(self, data):
self.data_ = data
def get(self):
return self.data_
data = property(get, set)
def _invalidate(self):
self.data = None
def __str__(self):
return '<' + str(self.data) + '>'
class _PyModule(PyDefinedObject, AbstractModule):
def __init__(self, pycore, ast_node, resource):
self.resource = resource
self.concluded_data = []
AbstractModule.__init__(self)
PyDefinedObject.__init__(self, pycore, ast_node, None)
def _get_concluded_data(self):
new_data = _ConcludedData()
self.concluded_data.append(new_data)
return new_data
def _forget_concluded_data(self):
for data in self.concluded_data:
data._invalidate()
def get_resource(self):
return self.resource
class PyModule(_PyModule):
"""Only a placeholder"""
class PyPackage(_PyModule):
"""Only a placeholder"""
class IsBeingInferredError(exceptions.RopeError):
pass | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/base/pyobjects.py | 0.845863 | 0.173568 | pyobjects.py | pypi |
import rope.base.evaluate
from rope.base import ast
class Arguments(object):
"""A class for evaluating parameters passed to a function
You can use the `create_arguments` factory. It handles implicit
first arguments.
"""
def __init__(self, args, scope):
self.args = args
self.scope = scope
self.instance = None
def get_arguments(self, parameters):
result = []
for pyname in self.get_pynames(parameters):
if pyname is None:
result.append(None)
else:
result.append(pyname.get_object())
return result
def get_pynames(self, parameters):
result = [None] * max(len(parameters), len(self.args))
for index, arg in enumerate(self.args):
if isinstance(arg, ast.keyword) and arg.arg in parameters:
result[parameters.index(arg.arg)] = self._evaluate(arg.value)
else:
result[index] = self._evaluate(arg)
return result
def get_instance_pyname(self):
if self.args:
return self._evaluate(self.args[0])
def _evaluate(self, ast_node):
return rope.base.evaluate.eval_node(self.scope, ast_node)
def create_arguments(primary, pyfunction, call_node, scope):
"""A factory for creating `Arguments`"""
args = list(call_node.args)
args.extend(call_node.keywords)
called = call_node.func
# XXX: Handle constructors
if _is_method_call(primary, pyfunction) and \
isinstance(called, ast.Attribute):
args.insert(0, called.value)
return Arguments(args, scope)
class ObjectArguments(object):
def __init__(self, pynames):
self.pynames = pynames
def get_arguments(self, parameters):
result = []
for pyname in self.pynames:
if pyname is None:
result.append(None)
else:
result.append(pyname.get_object())
return result
def get_pynames(self, parameters):
return self.pynames
def get_instance_pyname(self):
return self.pynames[0]
class MixedArguments(object):
def __init__(self, pyname, arguments, scope):
"""`argumens` is an instance of `Arguments`"""
self.pyname = pyname
self.args = arguments
def get_pynames(self, parameters):
return [self.pyname] + self.args.get_pynames(parameters[1:])
def get_arguments(self, parameters):
result = []
for pyname in self.get_pynames(parameters):
if pyname is None:
result.append(None)
else:
result.append(pyname.get_object())
return result
def get_instance_pyname(self):
return self.pyname
def _is_method_call(primary, pyfunction):
if primary is None:
return False
pyobject = primary.get_object()
if isinstance(pyobject.get_type(), rope.base.pyobjects.PyClass) and \
isinstance(pyfunction, rope.base.pyobjects.PyFunction) and \
isinstance(pyfunction.parent, rope.base.pyobjects.PyClass):
return True
if isinstance(pyobject.get_type(), rope.base.pyobjects.AbstractClass) and \
isinstance(pyfunction, rope.base.builtins.BuiltinFunction):
return True
return False | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/base/arguments.py | 0.655777 | 0.286606 | arguments.py | pypi |
import datetime
import difflib
import os
import time
import warnings
import rope.base.fscommands
from rope.base import taskhandle, exceptions, utils
class Change(object):
"""The base class for changes
Rope refactorings return `Change` objects. They can be previewed,
committed or undone.
"""
def do(self, job_set=None):
"""Perform the change
.. note:: Do use this directly. Use `Project.do()` instead.
"""
def undo(self, job_set=None):
"""Perform the change
.. note:: Do use this directly. Use `History.undo()` instead.
"""
def get_description(self):
"""Return the description of this change
This can be used for previewing the changes.
"""
return str(self)
def get_changed_resources(self):
"""Return the list of resources that will be changed"""
return []
@property
@utils.saveit
def _operations(self):
return _ResourceOperations(self.resource.project)
class ChangeSet(Change):
"""A collection of `Change` objects
This class holds a collection of changes. This class provides
these fields:
* `changes`: the list of changes
* `description`: the goal of these changes
"""
def __init__(self, description, timestamp=None):
self.changes = []
self.description = description
self.time = timestamp
def do(self, job_set=taskhandle.NullJobSet()):
try:
done = []
for change in self.changes:
change.do(job_set)
done.append(change)
self.time = time.time()
except Exception:
for change in done:
change.undo()
raise
def undo(self, job_set=taskhandle.NullJobSet()):
try:
done = []
for change in reversed(self.changes):
change.undo(job_set)
done.append(change)
except Exception:
for change in done:
change.do()
raise
def add_change(self, change):
self.changes.append(change)
def get_description(self):
result = [str(self) + ':\n\n\n']
for change in self.changes:
result.append(change.get_description())
result.append('\n')
return ''.join(result)
def __str__(self):
if self.time is not None:
date = datetime.datetime.fromtimestamp(self.time)
if date.date() == datetime.date.today():
string_date = 'today'
elif date.date() == (datetime.date.today() - datetime.timedelta(1)):
string_date = 'yesterday'
elif date.year == datetime.date.today().year:
string_date = date.strftime('%b %d')
else:
string_date = date.strftime('%d %b, %Y')
string_time = date.strftime('%H:%M:%S')
string_time = '%s %s ' % (string_date, string_time)
return self.description + ' - ' + string_time
return self.description
def get_changed_resources(self):
result = set()
for change in self.changes:
result.update(change.get_changed_resources())
return result
def _handle_job_set(function):
"""A decorator for handling `taskhandle.JobSet`\s
A decorator for handling `taskhandle.JobSet`\s for `do` and `undo`
methods of `Change`\s.
"""
def call(self, job_set=taskhandle.NullJobSet()):
job_set.started_job(str(self))
function(self)
job_set.finished_job()
return call
class ChangeContents(Change):
"""A class to change the contents of a file
Fields:
* `resource`: The `rope.base.resources.File` to change
* `new_contents`: What to write in the file
"""
def __init__(self, resource, new_contents, old_contents=None):
self.resource = resource
# IDEA: Only saving diffs; possible problems when undo/redoing
self.new_contents = new_contents
self.old_contents = old_contents
@_handle_job_set
def do(self):
if self.old_contents is None:
self.old_contents = self.resource.read()
self._operations.write_file(self.resource, self.new_contents)
@_handle_job_set
def undo(self):
if self.old_contents is None:
raise exceptions.HistoryError(
'Undoing a change that is not performed yet!')
self._operations.write_file(self.resource, self.old_contents)
def __str__(self):
return 'Change <%s>' % self.resource.path
def get_description(self):
new = self.new_contents
old = self.old_contents
if old is None:
if self.resource.exists():
old = self.resource.read()
else:
old = ''
result = difflib.unified_diff(
old.splitlines(True), new.splitlines(True),
'a/' + self.resource.path, 'b/' + self.resource.path)
return ''.join(list(result))
def get_changed_resources(self):
return [self.resource]
class MoveResource(Change):
"""Move a resource to a new location
Fields:
* `resource`: The `rope.base.resources.Resource` to move
* `new_resource`: The destination for move; It is the moved
resource not the folder containing that resource.
"""
def __init__(self, resource, new_location, exact=False):
self.project = resource.project
self.resource = resource
if not exact:
new_location = _get_destination_for_move(resource, new_location)
if resource.is_folder():
self.new_resource = self.project.get_folder(new_location)
else:
self.new_resource = self.project.get_file(new_location)
@_handle_job_set
def do(self):
self._operations.move(self.resource, self.new_resource)
@_handle_job_set
def undo(self):
self._operations.move(self.new_resource, self.resource)
def __str__(self):
return 'Move <%s>' % self.resource.path
def get_description(self):
return 'rename from %s\nrename to %s' % (self.resource.path,
self.new_resource.path)
def get_changed_resources(self):
return [self.resource, self.new_resource]
class CreateResource(Change):
"""A class to create a resource
Fields:
* `resource`: The resource to create
"""
def __init__(self, resource):
self.resource = resource
@_handle_job_set
def do(self):
self._operations.create(self.resource)
@_handle_job_set
def undo(self):
self._operations.remove(self.resource)
def __str__(self):
return 'Create Resource <%s>' % (self.resource.path)
def get_description(self):
return 'new file %s' % (self.resource.path)
def get_changed_resources(self):
return [self.resource]
def _get_child_path(self, parent, name):
if parent.path == '':
return name
else:
return parent.path + '/' + name
class CreateFolder(CreateResource):
"""A class to create a folder
See docs for `CreateResource`.
"""
def __init__(self, parent, name):
resource = parent.project.get_folder(self._get_child_path(parent, name))
super(CreateFolder, self).__init__(resource)
class CreateFile(CreateResource):
"""A class to create a file
See docs for `CreateResource`.
"""
def __init__(self, parent, name):
resource = parent.project.get_file(self._get_child_path(parent, name))
super(CreateFile, self).__init__(resource)
class RemoveResource(Change):
"""A class to remove a resource
Fields:
* `resource`: The resource to be removed
"""
def __init__(self, resource):
self.resource = resource
@_handle_job_set
def do(self):
self._operations.remove(self.resource)
# TODO: Undoing remove operations
@_handle_job_set
def undo(self):
raise NotImplementedError(
'Undoing `RemoveResource` is not implemented yet.')
def __str__(self):
return 'Remove <%s>' % (self.resource.path)
def get_changed_resources(self):
return [self.resource]
def count_changes(change):
"""Counts the number of basic changes a `Change` will make"""
if isinstance(change, ChangeSet):
result = 0
for child in change.changes:
result += count_changes(child)
return result
return 1
def create_job_set(task_handle, change):
return task_handle.create_jobset(str(change), count_changes(change))
class _ResourceOperations(object):
def __init__(self, project):
self.project = project
self.fscommands = project.fscommands
self.direct_commands = rope.base.fscommands.FileSystemCommands()
def _get_fscommands(self, resource):
if self.project.is_ignored(resource):
return self.direct_commands
return self.fscommands
def write_file(self, resource, contents):
data = rope.base.fscommands.unicode_to_file_data(contents)
fscommands = self._get_fscommands(resource)
fscommands.write(resource.real_path, data)
for observer in list(self.project.observers):
observer.resource_changed(resource)
def move(self, resource, new_resource):
fscommands = self._get_fscommands(resource)
fscommands.move(resource.real_path, new_resource.real_path)
for observer in list(self.project.observers):
observer.resource_moved(resource, new_resource)
def create(self, resource):
if resource.is_folder():
self._create_resource(resource.path, kind='folder')
else:
self._create_resource(resource.path)
for observer in list(self.project.observers):
observer.resource_created(resource)
def remove(self, resource):
fscommands = self._get_fscommands(resource)
fscommands.remove(resource.real_path)
for observer in list(self.project.observers):
observer.resource_removed(resource)
def _create_resource(self, file_name, kind='file'):
resource_path = self.project._get_resource_path(file_name)
if os.path.exists(resource_path):
raise exceptions.RopeError('Resource <%s> already exists'
% resource_path)
resource = self.project.get_file(file_name)
if not resource.parent.exists():
raise exceptions.ResourceNotFoundError(
'Parent folder of <%s> does not exist' % resource.path)
fscommands = self._get_fscommands(resource)
try:
if kind == 'file':
fscommands.create_file(resource_path)
else:
fscommands.create_folder(resource_path)
except IOError as e:
raise exceptions.RopeError(e)
def _get_destination_for_move(resource, destination):
dest_path = resource.project._get_resource_path(destination)
if os.path.isdir(dest_path):
if destination != '':
return destination + '/' + resource.name
else:
return resource.name
return destination
class ChangeToData(object):
def convertChangeSet(self, change):
description = change.description
changes = []
for child in change.changes:
changes.append(self(child))
return (description, changes, change.time)
def convertChangeContents(self, change):
return (change.resource.path, change.new_contents, change.old_contents)
def convertMoveResource(self, change):
return (change.resource.path, change.new_resource.path)
def convertCreateResource(self, change):
return (change.resource.path, change.resource.is_folder())
def convertRemoveResource(self, change):
return (change.resource.path, change.resource.is_folder())
def __call__(self, change):
change_type = type(change)
if change_type in (CreateFolder, CreateFile):
change_type = CreateResource
method = getattr(self, 'convert' + change_type.__name__)
return (change_type.__name__, method(change))
class DataToChange(object):
def __init__(self, project):
self.project = project
def makeChangeSet(self, description, changes, time=None):
result = ChangeSet(description, time)
for child in changes:
result.add_change(self(child))
return result
def makeChangeContents(self, path, new_contents, old_contents):
resource = self.project.get_file(path)
return ChangeContents(resource, new_contents, old_contents)
def makeMoveResource(self, old_path, new_path):
resource = self.project.get_file(old_path)
return MoveResource(resource, new_path, exact=True)
def makeCreateResource(self, path, is_folder):
if is_folder:
resource = self.project.get_folder(path)
else:
resource = self.project.get_file(path)
return CreateResource(resource)
def makeRemoveResource(self, path, is_folder):
if is_folder:
resource = self.project.get_folder(path)
else:
resource = self.project.get_file(path)
return RemoveResource(resource)
def __call__(self, data):
method = getattr(self, 'make' + data[0])
return method(*data[1]) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/base/change.py | 0.714329 | 0.166134 | change.py | pypi |
import bisect
import keyword
import rope.base.simplify
def get_name_at(resource, offset):
source_code = resource.read()
word_finder = Worder(source_code)
return word_finder.get_word_at(offset)
class Worder(object):
"""A class for finding boundaries of words and expressions
Note that in these methods, offset should be the index of the
character not the index of the character after it.
"""
def __init__(self, code, handle_ignores=False):
simplified = rope.base.simplify.real_code(code)
self.code_finder = _RealFinder(simplified, code)
self.handle_ignores = handle_ignores
self.code = code
def _init_ignores(self):
ignores = rope.base.simplify.ignored_regions(self.code)
self.dumb_finder = _RealFinder(self.code, self.code)
self.starts = [ignored[0] for ignored in ignores]
self.ends = [ignored[1] for ignored in ignores]
def _context_call(self, name, offset):
if self.handle_ignores:
if not hasattr(self, 'starts'):
self._init_ignores()
start = bisect.bisect(self.starts, offset)
if start > 0 and offset < self.ends[start - 1]:
return getattr(self.dumb_finder, name)(offset)
return getattr(self.code_finder, name)(offset)
def get_primary_at(self, offset):
return self._context_call('get_primary_at', offset)
def get_word_at(self, offset):
return self._context_call('get_word_at', offset)
def get_primary_range(self, offset):
return self._context_call('get_primary_range', offset)
def get_splitted_primary_before(self, offset):
return self._context_call('get_splitted_primary_before', offset)
def get_word_range(self, offset):
return self._context_call('get_word_range', offset)
def is_function_keyword_parameter(self, offset):
return self.code_finder.is_function_keyword_parameter(offset)
def is_a_class_or_function_name_in_header(self, offset):
return self.code_finder.is_a_class_or_function_name_in_header(offset)
def is_from_statement_module(self, offset):
return self.code_finder.is_from_statement_module(offset)
def is_from_aliased(self, offset):
return self.code_finder.is_from_aliased(offset)
def find_parens_start_from_inside(self, offset):
return self.code_finder.find_parens_start_from_inside(offset)
def is_a_name_after_from_import(self, offset):
return self.code_finder.is_a_name_after_from_import(offset)
def is_from_statement(self, offset):
return self.code_finder.is_from_statement(offset)
def get_from_aliased(self, offset):
return self.code_finder.get_from_aliased(offset)
def is_import_statement(self, offset):
return self.code_finder.is_import_statement(offset)
def is_assigned_here(self, offset):
return self.code_finder.is_assigned_here(offset)
def is_a_function_being_called(self, offset):
return self.code_finder.is_a_function_being_called(offset)
def get_word_parens_range(self, offset):
return self.code_finder.get_word_parens_range(offset)
def is_name_assigned_in_class_body(self, offset):
return self.code_finder.is_name_assigned_in_class_body(offset)
def is_on_function_call_keyword(self, offset):
return self.code_finder.is_on_function_call_keyword(offset)
def _find_parens_start(self, offset):
return self.code_finder._find_parens_start(offset)
def get_parameters(self, first, last):
return self.code_finder.get_parameters(first, last)
def get_from_module(self, offset):
return self.code_finder.get_from_module(offset)
def is_assigned_in_a_tuple_assignment(self, offset):
return self.code_finder.is_assigned_in_a_tuple_assignment(offset)
def get_assignment_type(self, offset):
return self.code_finder.get_assignment_type(offset)
def get_function_and_args_in_header(self, offset):
return self.code_finder.get_function_and_args_in_header(offset)
def get_lambda_and_args(self, offset):
return self.code_finder.get_lambda_and_args(offset)
def find_function_offset(self, offset):
return self.code_finder.find_function_offset(offset)
class _RealFinder(object):
def __init__(self, code, raw):
self.code = code
self.raw = raw
def _find_word_start(self, offset):
current_offset = offset
while current_offset >= 0 and self._is_id_char(current_offset):
current_offset -= 1
return current_offset + 1
def _find_word_end(self, offset):
while offset + 1 < len(self.code) and self._is_id_char(offset + 1):
offset += 1
return offset
def _find_last_non_space_char(self, offset):
while offset >= 0 and self.code[offset].isspace():
if self.code[offset] == '\n':
return offset
offset -= 1
return max(-1, offset)
def get_word_at(self, offset):
offset = self._get_fixed_offset(offset)
return self.raw[self._find_word_start(offset):
self._find_word_end(offset) + 1]
def _get_fixed_offset(self, offset):
if offset >= len(self.code):
return offset - 1
if not self._is_id_char(offset):
if offset > 0 and self._is_id_char(offset - 1):
return offset - 1
if offset < len(self.code) - 1 and self._is_id_char(offset + 1):
return offset + 1
return offset
def _is_id_char(self, offset):
return self.code[offset].isalnum() or self.code[offset] == '_'
def _find_string_start(self, offset):
kind = self.code[offset]
try:
return self.code.rindex(kind, 0, offset)
except ValueError:
return 0
def _find_parens_start(self, offset):
offset = self._find_last_non_space_char(offset - 1)
while offset >= 0 and self.code[offset] not in '[({':
if self.code[offset] not in ':,':
offset = self._find_primary_start(offset)
offset = self._find_last_non_space_char(offset - 1)
return offset
def _find_atom_start(self, offset):
old_offset = offset
if self.code[offset] == '\n':
return offset + 1
if self.code[offset].isspace():
offset = self._find_last_non_space_char(offset)
if self.code[offset] in '\'"':
return self._find_string_start(offset)
if self.code[offset] in ')]}':
return self._find_parens_start(offset)
if self._is_id_char(offset):
return self._find_word_start(offset)
return old_offset
def _find_primary_without_dot_start(self, offset):
"""It tries to find the undotted primary start
It is different from `self._get_atom_start()` in that it
follows function calls, too; such as in ``f(x)``.
"""
last_atom = offset
offset = self._find_last_non_space_char(last_atom)
while offset > 0 and self.code[offset] in ')]':
last_atom = self._find_parens_start(offset)
offset = self._find_last_non_space_char(last_atom - 1)
if offset >= 0 and (self.code[offset] in '"\'})]' or
self._is_id_char(offset)):
atom_start = self._find_atom_start(offset)
if not keyword.iskeyword(self.code[atom_start:offset + 1]):
return atom_start
return last_atom
def _find_primary_start(self, offset):
if offset >= len(self.code):
offset = len(self.code) - 1
if self.code[offset] != '.':
offset = self._find_primary_without_dot_start(offset)
else:
offset = offset + 1
while offset > 0:
prev = self._find_last_non_space_char(offset - 1)
if offset <= 0 or self.code[prev] != '.':
break
offset = self._find_primary_without_dot_start(prev - 1)
if not self._is_id_char(offset):
break
return offset
def get_primary_at(self, offset):
offset = self._get_fixed_offset(offset)
start, end = self.get_primary_range(offset)
return self.raw[start:end].strip()
def get_splitted_primary_before(self, offset):
"""returns expression, starting, starting_offset
This function is used in `rope.codeassist.assist` function.
"""
if offset == 0:
return ('', '', 0)
end = offset - 1
word_start = self._find_atom_start(end)
real_start = self._find_primary_start(end)
if self.code[word_start:offset].strip() == '':
word_start = end
if self.code[end].isspace():
word_start = end
if self.code[real_start:word_start].strip() == '':
real_start = word_start
if real_start == word_start == end and not self._is_id_char(end):
return ('', '', offset)
if real_start == word_start:
return ('', self.raw[word_start:offset], word_start)
else:
if self.code[end] == '.':
return (self.raw[real_start:end], '', offset)
last_dot_position = word_start
if self.code[word_start] != '.':
last_dot_position = self._find_last_non_space_char(word_start - 1)
last_char_position = self._find_last_non_space_char(last_dot_position - 1)
if self.code[word_start].isspace():
word_start = offset
return (self.raw[real_start:last_char_position + 1],
self.raw[word_start:offset], word_start)
def _get_line_start(self, offset):
try:
return self.code.rindex('\n', 0, offset + 1)
except ValueError:
return 0
def _get_line_end(self, offset):
try:
return self.code.index('\n', offset)
except ValueError:
return len(self.code)
def is_name_assigned_in_class_body(self, offset):
word_start = self._find_word_start(offset - 1)
word_end = self._find_word_end(offset) + 1
if '.' in self.code[word_start:word_end]:
return False
line_start = self._get_line_start(word_start)
line = self.code[line_start:word_start].strip()
return not line and self.get_assignment_type(offset) == '='
def is_a_class_or_function_name_in_header(self, offset):
word_start = self._find_word_start(offset - 1)
line_start = self._get_line_start(word_start)
prev_word = self.code[line_start:word_start].strip()
return prev_word in ['def', 'class']
def _find_first_non_space_char(self, offset):
if offset >= len(self.code):
return len(self.code)
while offset < len(self.code) and self.code[offset].isspace():
if self.code[offset] == '\n':
return offset
offset += 1
return offset
def is_a_function_being_called(self, offset):
word_end = self._find_word_end(offset) + 1
next_char = self._find_first_non_space_char(word_end)
return next_char < len(self.code) and \
self.code[next_char] == '(' and \
not self.is_a_class_or_function_name_in_header(offset)
def _find_import_end(self, start):
return self._get_line_end(start)
def is_import_statement(self, offset):
try:
last_import = self.code.rindex('import ', 0, offset)
except ValueError:
return False
return self._find_import_end(last_import + 7) >= offset
def is_from_statement(self, offset):
try:
last_from = self.code.rindex('from ', 0, offset)
from_import = self.code.index(' import ', last_from)
from_names = from_import + 8
except ValueError:
return False
from_names = self._find_first_non_space_char(from_names)
return self._find_import_end(from_names) >= offset
def is_from_statement_module(self, offset):
if offset >= len(self.code) - 1:
return False
stmt_start = self._find_primary_start(offset)
line_start = self._get_line_start(stmt_start)
prev_word = self.code[line_start:stmt_start].strip()
return prev_word == 'from'
def is_a_name_after_from_import(self, offset):
try:
if len(self.code) > offset and self.code[offset] == '\n':
line_start = self._get_line_start(offset - 1)
else:
line_start = self._get_line_start(offset)
last_from = self.code.rindex('from ', line_start, offset)
from_import = self.code.index(' import ', last_from)
from_names = from_import + 8
except ValueError:
return False
if from_names - 1 > offset:
return False
return self._find_import_end(from_names) >= offset
def get_from_module(self, offset):
try:
last_from = self.code.rindex('from ', 0, offset)
import_offset = self.code.index(' import ', last_from)
end = self._find_last_non_space_char(import_offset)
return self.get_primary_at(end)
except ValueError:
pass
def is_from_aliased(self, offset):
if not self.is_a_name_after_from_import(offset):
return False
try:
end = self._find_word_end(offset)
as_end = min(self._find_word_end(end + 1), len(self.code))
as_start = self._find_word_start(as_end)
if self.code[as_start:as_end + 1] == 'as':
return True
except ValueError:
return False
def get_from_aliased(self, offset):
try:
end = self._find_word_end(offset)
as_ = self._find_word_end(end + 1)
alias = self._find_word_end(as_ + 1)
start = self._find_word_start(alias)
return self.raw[start:alias + 1]
except ValueError:
pass
def is_function_keyword_parameter(self, offset):
word_end = self._find_word_end(offset)
if word_end + 1 == len(self.code):
return False
next_char = self._find_first_non_space_char(word_end + 1)
equals = self.code[next_char:next_char + 2]
if equals == '==' or not equals.startswith('='):
return False
word_start = self._find_word_start(offset)
prev_char = self._find_last_non_space_char(word_start - 1)
return prev_char - 1 >= 0 and self.code[prev_char] in ',('
def is_on_function_call_keyword(self, offset):
stop = self._get_line_start(offset)
if self._is_id_char(offset):
offset = self._find_word_start(offset) - 1
offset = self._find_last_non_space_char(offset)
if offset <= stop or self.code[offset] not in '(,':
return False
parens_start = self.find_parens_start_from_inside(offset)
return stop < parens_start
def find_parens_start_from_inside(self, offset):
stop = self._get_line_start(offset)
opens = 1
while offset > stop:
if self.code[offset] == '(':
break
if self.code[offset] != ',':
offset = self._find_primary_start(offset)
offset -= 1
return max(stop, offset)
def is_assigned_here(self, offset):
return self.get_assignment_type(offset) is not None
def get_assignment_type(self, offset):
# XXX: does not handle tuple assignments
word_end = self._find_word_end(offset)
next_char = self._find_first_non_space_char(word_end + 1)
single = self.code[next_char:next_char + 1]
double = self.code[next_char:next_char + 2]
triple = self.code[next_char:next_char + 3]
if double not in ('==', '<=', '>=', '!='):
for op in [single, double, triple]:
if op.endswith('='):
return op
def get_primary_range(self, offset):
start = self._find_primary_start(offset)
end = self._find_word_end(offset) + 1
return (start, end)
def get_word_range(self, offset):
offset = max(0, offset)
start = self._find_word_start(offset)
end = self._find_word_end(offset) + 1
return (start, end)
def get_word_parens_range(self, offset, opening='(', closing=')'):
end = self._find_word_end(offset)
start_parens = self.code.index(opening, end)
index = start_parens
open_count = 0
while index < len(self.code):
if self.code[index] == opening:
open_count += 1
if self.code[index] == closing:
open_count -= 1
if open_count == 0:
return (start_parens, index + 1)
index += 1
return (start_parens, index)
def get_parameters(self, first, last):
keywords = []
args = []
current = self._find_last_non_space_char(last - 1)
while current > first:
primary_start = current
current = self._find_primary_start(current)
while current != first and self.code[current] not in '=,':
current = self._find_last_non_space_char(current - 1)
primary = self.raw[current + 1:primary_start + 1].strip()
if self.code[current] == '=':
primary_start = current - 1
current -= 1
while current != first and self.code[current] not in ',':
current = self._find_last_non_space_char(current - 1)
param_name = self.raw[current + 1:primary_start + 1].strip()
keywords.append((param_name, primary))
else:
args.append(primary)
current = self._find_last_non_space_char(current - 1)
args.reverse()
keywords.reverse()
return args, keywords
def is_assigned_in_a_tuple_assignment(self, offset):
start = self._get_line_start(offset)
end = self._get_line_end(offset)
primary_start = self._find_primary_start(offset)
primary_end = self._find_word_end(offset)
prev_char_offset = self._find_last_non_space_char(primary_start - 1)
next_char_offset = self._find_first_non_space_char(primary_end + 1)
next_char = prev_char = ''
if prev_char_offset >= start:
prev_char = self.code[prev_char_offset]
if next_char_offset < end:
next_char = self.code[next_char_offset]
try:
equals_offset = self.code.index('=', start, end)
except ValueError:
return False
if prev_char not in '(,' and next_char not in ',)':
return False
parens_start = self.find_parens_start_from_inside(offset)
# XXX: only handling (x, y) = value
return offset < equals_offset and \
self.code[start:parens_start].strip() == ''
def get_function_and_args_in_header(self, offset):
offset = self.find_function_offset(offset)
lparens, rparens = self.get_word_parens_range(offset)
return self.raw[offset:rparens + 1]
def find_function_offset(self, offset, definition='def '):
while True:
offset = self.code.index(definition, offset)
if offset == 0 or not self._is_id_char(offset - 1):
break
offset += 1
def_ = offset + 4
return self._find_first_non_space_char(def_)
def get_lambda_and_args(self, offset):
offset = self.find_function_offset(offset, definition = 'lambda ')
lparens, rparens = self.get_word_parens_range(offset, opening=' ', closing=':')
return self.raw[offset:rparens + 1] | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/base/worder.py | 0.596668 | 0.273196 | worder.py | pypi |
import warnings
def saveit(func):
"""A decorator that caches the return value of a function"""
name = '_' + func.__name__
def _wrapper(self, *args, **kwds):
if not hasattr(self, name):
setattr(self, name, func(self, *args, **kwds))
return getattr(self, name)
return _wrapper
cacheit = saveit
def prevent_recursion(default):
"""A decorator that returns the return value of `default` in recursions"""
def decorator(func):
name = '_calling_%s_' % func.__name__
def newfunc(self, *args, **kwds):
if getattr(self, name, False):
return default()
setattr(self, name, True)
try:
return func(self, *args, **kwds)
finally:
setattr(self, name, False)
return newfunc
return decorator
def ignore_exception(exception_class):
"""A decorator that ignores `exception_class` exceptions"""
def _decorator(func):
def newfunc(*args, **kwds):
try:
return func(*args, **kwds)
except exception_class:
pass
return newfunc
return _decorator
def deprecated(message=None):
"""A decorator for deprecated functions"""
def _decorator(func, message=message):
if message is None:
message = '%s is deprecated' % func.__name__
def newfunc(*args, **kwds):
warnings.warn(message, DeprecationWarning, stacklevel=2)
return func(*args, **kwds)
return newfunc
return _decorator
def cached(count):
"""A caching decorator based on parameter objects"""
def decorator(func):
return _Cached(func, count)
return decorator
class _Cached(object):
def __init__(self, func, count):
self.func = func
self.cache = []
self.count = count
def __call__(self, *args, **kwds):
key = (args, kwds)
for cached_key, cached_result in self.cache:
if cached_key == key:
return cached_result
result = self.func(*args, **kwds)
self.cache.append((key, result))
if len(self.cache) > self.count:
del self.cache[0]
return result | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/base/utils.py | 0.648244 | 0.207155 | utils.py | pypi |
import warnings
from rope.base import exceptions
class TaskHandle(object):
def __init__(self, name='Task', interrupts=True):
"""Construct a TaskHandle
If `interrupts` is `False` the task won't be interrupted by
calling `TaskHandle.stop()`.
"""
self.name = name
self.interrupts = interrupts
self.stopped = False
self.job_sets = []
self.observers = []
def stop(self):
"""Interrupts the refactoring"""
if self.interrupts:
self.stopped = True
self._inform_observers()
def current_jobset(self):
"""Return the current `JobSet`"""
if self.job_sets:
return self.job_sets[-1]
def add_observer(self, observer):
"""Register an observer for this task handle
The observer is notified whenever the task is stopped or
a job gets finished.
"""
self.observers.append(observer)
def is_stopped(self):
return self.stopped
def get_jobsets(self):
return self.job_sets
def create_jobset(self, name='JobSet', count=None):
result = JobSet(self, name=name, count=count)
self.job_sets.append(result)
self._inform_observers()
return result
def _inform_observers(self):
for observer in list(self.observers):
observer()
class JobSet(object):
def __init__(self, handle, name, count):
self.handle = handle
self.name = name
self.count = count
self.done = 0
self.job_name = None
def started_job(self, name):
self.check_status()
self.job_name = name
self.handle._inform_observers()
def finished_job(self):
self.check_status()
self.done += 1
self.handle._inform_observers()
self.job_name = None
def check_status(self):
if self.handle.is_stopped():
raise exceptions.InterruptedTaskError()
def get_active_job_name(self):
return self.job_name
def get_percent_done(self):
if self.count is not None and self.count > 0:
percent = self.done * 100 // self.count
return min(percent, 100)
def get_name(self):
return self.name
class NullTaskHandle(object):
def __init__(self):
pass
def is_stopped(self):
return False
def stop(self):
pass
def create_jobset(self, *args, **kwds):
return NullJobSet()
def get_jobsets(self):
return []
def add_observer(self, observer):
pass
class NullJobSet(object):
def started_job(self, name):
pass
def finished_job(self):
pass
def check_status(self):
pass
def get_active_job_name(self):
pass
def get_percent_done(self):
pass
def get_name(self):
pass | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/base/taskhandle.py | 0.703346 | 0.165357 | taskhandle.py | pypi |
import rope.base.pyobjects
from rope.base import exceptions, utils
class PyName(object):
"""References to `PyObject`\s inside python programs"""
def get_object(self):
"""Return the `PyObject` object referenced by this `PyName`"""
def get_definition_location(self):
"""Return a (module, lineno) tuple"""
class DefinedName(PyName):
def __init__(self, pyobject):
self.pyobject = pyobject
def get_object(self):
return self.pyobject
def get_definition_location(self):
return (self.pyobject.get_module(), self.pyobject.get_ast().lineno)
class AssignedName(PyName):
"""Only a placeholder"""
class UnboundName(PyName):
def __init__(self, pyobject=None):
self.pyobject = pyobject
if self.pyobject is None:
self.pyobject = rope.base.pyobjects.get_unknown()
def get_object(self):
return self.pyobject
def get_definition_location(self):
return (None, None)
class AssignmentValue(object):
"""An assigned expression"""
def __init__(self, ast_node, levels=None, evaluation='',
assign_type=False):
"""The `level` is `None` for simple assignments and is
a list of numbers for tuple assignments for example in::
a, (b, c) = x
The levels for for `a` is ``[0]``, for `b` is ``[1, 0]`` and for
`c` is ``[1, 1]``.
"""
self.ast_node = ast_node
if levels == None:
self.levels = []
else:
self.levels = levels
self.evaluation = evaluation
self.assign_type = assign_type
def get_lineno(self):
return self.ast_node.lineno
class EvaluatedName(PyName):
"""A name whose object will be evaluated later"""
def __init__(self, callback, module=None, lineno=None):
self.module = module
self.lineno = lineno
self.callback = callback
self.pyobject = _Inferred(callback, _get_concluded_data(module))
def get_object(self):
return self.pyobject.get()
def get_definition_location(self):
return (self.module, self.lineno)
def invalidate(self):
"""Forget the `PyObject` this `PyName` holds"""
self.pyobject.set(None)
class ParameterName(PyName):
"""Only a placeholder"""
class ImportedModule(PyName):
def __init__(self, importing_module, module_name=None,
level=0, resource=None):
self.importing_module = importing_module
self.module_name = module_name
self.level = level
self.resource = resource
self.pymodule = _get_concluded_data(self.importing_module)
def _current_folder(self):
resource = self.importing_module.get_module().get_resource()
if resource is None:
return None
return resource.parent
def _get_pymodule(self):
if self.pymodule.get() is None:
pycore = self.importing_module.pycore
if self.resource is not None:
self.pymodule.set(pycore.resource_to_pyobject(self.resource))
elif self.module_name is not None:
try:
if self.level == 0:
pymodule = pycore.get_module(self.module_name,
self._current_folder())
else:
pymodule = pycore.get_relative_module(
self.module_name, self._current_folder(), self.level)
self.pymodule.set(pymodule)
except exceptions.ModuleNotFoundError:
pass
return self.pymodule.get()
def get_object(self):
if self._get_pymodule() is None:
return rope.base.pyobjects.get_unknown()
return self._get_pymodule()
def get_definition_location(self):
pymodule = self._get_pymodule()
if not isinstance(pymodule, rope.base.pyobjects.PyDefinedObject):
return (None, None)
return (pymodule.get_module(), 1)
class ImportedName(PyName):
def __init__(self, imported_module, imported_name):
self.imported_module = imported_module
self.imported_name = imported_name
def _get_imported_pyname(self):
try:
result = self.imported_module.get_object()[self.imported_name]
if result != self:
return result
except exceptions.AttributeNotFoundError:
pass
return UnboundName()
@utils.prevent_recursion(rope.base.pyobjects.get_unknown)
def get_object(self):
return self._get_imported_pyname().get_object()
@utils.prevent_recursion(lambda: (None, None))
def get_definition_location(self):
return self._get_imported_pyname().get_definition_location()
def _get_concluded_data(module):
if module is None:
return rope.base.pyobjects._ConcludedData()
return module._get_concluded_data()
def _circular_inference():
raise rope.base.pyobjects.IsBeingInferredError(
'Circular Object Inference')
class _Inferred(object):
def __init__(self, get_inferred, concluded=None):
self.get_inferred = get_inferred
self.concluded = concluded
if self.concluded is None:
self.temp = None
@utils.prevent_recursion(_circular_inference)
def get(self, *args, **kwds):
if self.concluded is None or self.concluded.get() is None:
self.set(self.get_inferred(*args, **kwds))
if self._get() is None:
self.set(rope.base.pyobjects.get_unknown())
return self._get()
def set(self, pyobject):
if self.concluded is not None:
self.concluded.set(pyobject)
self.temp = pyobject
def _get(self):
if self.concluded is not None:
return self.concluded.get()
return self.temp | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/base/pynames.py | 0.759939 | 0.280419 | pynames.py | pypi |
import os
import re
import rope.base.builtins
from rope.base import exceptions
class PyObjectToTextual(object):
"""For transforming `PyObject` to textual form
This can be used for storing `PyObjects` in files. Use
`TextualToPyObject` for converting back.
"""
def __init__(self, project):
self.project = project
def transform(self, pyobject):
"""Transform a `PyObject` to textual form"""
if pyobject is None:
return ('none',)
object_type = type(pyobject)
try:
method = getattr(self, object_type.__name__ + '_to_textual')
return method(pyobject)
except AttributeError:
return ('unknown',)
def __call__(self, pyobject):
return self.transform(pyobject)
def PyObject_to_textual(self, pyobject):
if isinstance(pyobject.get_type(), rope.base.pyobjects.AbstractClass):
result = self.transform(pyobject.get_type())
if result[0] == 'defined':
return ('instance', result)
return result
return ('unknown',)
def PyFunction_to_textual(self, pyobject):
return self._defined_to_textual(pyobject)
def PyClass_to_textual(self, pyobject):
return self._defined_to_textual(pyobject)
def _defined_to_textual(self, pyobject):
address = []
while pyobject.parent is not None:
address.insert(0, pyobject.get_name())
pyobject = pyobject.parent
return ('defined', self._get_pymodule_path(pyobject.get_module()),
'.'.join(address))
def PyModule_to_textual(self, pyobject):
return ('defined', self._get_pymodule_path(pyobject))
def PyPackage_to_textual(self, pyobject):
return ('defined', self._get_pymodule_path(pyobject))
def List_to_textual(self, pyobject):
return ('builtin', 'list', self.transform(pyobject.holding))
def Dict_to_textual(self, pyobject):
return ('builtin', 'dict', self.transform(pyobject.keys),
self.transform(pyobject.values))
def Tuple_to_textual(self, pyobject):
objects = [self.transform(holding)
for holding in pyobject.get_holding_objects()]
return tuple(['builtin', 'tuple'] + objects)
def Set_to_textual(self, pyobject):
return ('builtin', 'set', self.transform(pyobject.holding))
def Iterator_to_textual(self, pyobject):
return ('builtin', 'iter', self.transform(pyobject.holding))
def Generator_to_textual(self, pyobject):
return ('builtin', 'generator', self.transform(pyobject.holding))
def Str_to_textual(self, pyobject):
return ('builtin', 'str')
def File_to_textual(self, pyobject):
return ('builtin', 'file')
def BuiltinFunction_to_textual(self, pyobject):
return ('builtin', 'function', pyobject.get_name())
def _get_pymodule_path(self, pymodule):
return self.resource_to_path(pymodule.get_resource())
def resource_to_path(self, resource):
if resource.project == self.project:
return resource.path
else:
return resource.real_path
class TextualToPyObject(object):
"""For transforming textual form to `PyObject`"""
def __init__(self, project, allow_in_project_absolutes=False):
self.project = project
def __call__(self, textual):
return self.transform(textual)
def transform(self, textual):
"""Transform an object from textual form to `PyObject`"""
if textual is None:
return None
type = textual[0]
try:
method = getattr(self, type + '_to_pyobject')
return method(textual)
except AttributeError:
return None
def builtin_to_pyobject(self, textual):
name = textual[1]
method = getattr(self, 'builtin_%s_to_pyobject' % textual[1], None)
if method is not None:
return method(textual)
def builtin_str_to_pyobject(self, textual):
return rope.base.builtins.get_str()
def builtin_list_to_pyobject(self, textual):
holding = self.transform(textual[2])
return rope.base.builtins.get_list(holding)
def builtin_dict_to_pyobject(self, textual):
keys = self.transform(textual[2])
values = self.transform(textual[3])
return rope.base.builtins.get_dict(keys, values)
def builtin_tuple_to_pyobject(self, textual):
objects = []
for holding in textual[2:]:
objects.append(self.transform(holding))
return rope.base.builtins.get_tuple(*objects)
def builtin_set_to_pyobject(self, textual):
holding = self.transform(textual[2])
return rope.base.builtins.get_set(holding)
def builtin_iter_to_pyobject(self, textual):
holding = self.transform(textual[2])
return rope.base.builtins.get_iterator(holding)
def builtin_generator_to_pyobject(self, textual):
holding = self.transform(textual[2])
return rope.base.builtins.get_generator(holding)
def builtin_file_to_pyobject(self, textual):
return rope.base.builtins.get_file()
def builtin_function_to_pyobject(self, textual):
if textual[2] in rope.base.builtins.builtins:
return rope.base.builtins.builtins[textual[2]].get_object()
def unknown_to_pyobject(self, textual):
return None
def none_to_pyobject(self, textual):
return None
def _module_to_pyobject(self, textual):
path = textual[1]
return self._get_pymodule(path)
def _hierarchical_defined_to_pyobject(self, textual):
path = textual[1]
names = textual[2].split('.')
pymodule = self._get_pymodule(path)
pyobject = pymodule
for name in names:
if pyobject is None:
return None
if isinstance(pyobject, rope.base.pyobjects.PyDefinedObject):
try:
pyobject = pyobject.get_scope()[name].get_object()
except exceptions.NameNotFoundError:
return None
else:
return None
return pyobject
def defined_to_pyobject(self, textual):
if len(textual) == 2 or textual[2] == '':
return self._module_to_pyobject(textual)
else:
return self._hierarchical_defined_to_pyobject(textual)
def instance_to_pyobject(self, textual):
type = self.transform(textual[1])
if type is not None:
return rope.base.pyobjects.PyObject(type)
def _get_pymodule(self, path):
resource = self.path_to_resource(path)
if resource is not None:
return self.project.pycore.resource_to_pyobject(resource)
def path_to_resource(self, path):
try:
root = self.project.address
if not os.path.isabs(path):
return self.project.get_resource(path)
if path == root or path.startswith(root + os.sep):
# INFO: This is a project file; should not be absolute
return None
import rope.base.project
return rope.base.project.get_no_project().get_resource(path)
except exceptions.ResourceNotFoundError:
return None
class DOITextualToPyObject(TextualToPyObject):
"""For transforming textual form to `PyObject`
The textual form DOI uses is different from rope's standard
textual form. The reason is that we cannot find the needed
information by analyzing live objects. This class can be
used to transform DOI textual form to `PyObject` and later
we can convert it to standard textual form using
`TextualToPyObject` class.
"""
def _function_to_pyobject(self, textual):
path = textual[1]
lineno = int(textual[2])
pymodule = self._get_pymodule(path)
if pymodule is not None:
scope = pymodule.get_scope()
inner_scope = scope.get_inner_scope_for_line(lineno)
return inner_scope.pyobject
def _class_to_pyobject(self, textual):
path, name = textual[1:]
pymodule = self._get_pymodule(path)
if pymodule is None:
return None
module_scope = pymodule.get_scope()
suspected = None
if name in module_scope.get_names():
suspected = module_scope[name].get_object()
if suspected is not None and \
isinstance(suspected, rope.base.pyobjects.PyClass):
return suspected
else:
lineno = self._find_occurrence(name, pymodule.get_resource().read())
if lineno is not None:
inner_scope = module_scope.get_inner_scope_for_line(lineno)
return inner_scope.pyobject
def defined_to_pyobject(self, textual):
if len(textual) == 2:
return self._module_to_pyobject(textual)
else:
if textual[2].isdigit():
result = self._function_to_pyobject(textual)
else:
result = self._class_to_pyobject(textual)
if not isinstance(result, rope.base.pyobjects.PyModule):
return result
def _find_occurrence(self, name, source):
pattern = re.compile(r'^\s*class\s*' + name + r'\b')
lines = source.split('\n')
for i in range(len(lines)):
if pattern.match(lines[i]):
return i + 1
def path_to_resource(self, path):
import rope.base.libutils
root = self.project.address
relpath = rope.base.libutils.relative(root, path)
if relpath is not None:
path = relpath
return super(DOITextualToPyObject, self).path_to_resource(path) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/base/oi/transform.py | 0.695131 | 0.319572 | transform.py | pypi |
import rope.base.builtins
import rope.base.pynames
import rope.base.pyobjects
from rope.base import evaluate, utils, arguments
_ignore_inferred = utils.ignore_exception(
rope.base.pyobjects.IsBeingInferredError)
@_ignore_inferred
def infer_returned_object(pyfunction, args):
"""Infer the `PyObject` this `PyFunction` returns after calling"""
object_info = pyfunction.pycore.object_info
result = object_info.get_exact_returned(pyfunction, args)
if result is not None:
return result
result = _infer_returned(pyfunction, args)
if result is not None:
if args and pyfunction.get_module().get_resource() is not None:
params = args.get_arguments(
pyfunction.get_param_names(special_args=False))
object_info.function_called(pyfunction, params, result)
return result
return object_info.get_returned(pyfunction, args)
@_ignore_inferred
def infer_parameter_objects(pyfunction):
"""Infer the `PyObject`\s of parameters of this `PyFunction`"""
object_info = pyfunction.pycore.object_info
result = object_info.get_parameter_objects(pyfunction)
if result is None:
result = _parameter_objects(pyfunction)
_handle_first_parameter(pyfunction, result)
return result
def _handle_first_parameter(pyobject, parameters):
kind = pyobject.get_kind()
if parameters is None or kind not in ['method', 'classmethod']:
pass
if not parameters:
if not pyobject.get_param_names(special_args=False):
return
parameters.append(rope.base.pyobjects.get_unknown())
if kind == 'method':
parameters[0] = rope.base.pyobjects.PyObject(pyobject.parent)
if kind == 'classmethod':
parameters[0] = pyobject.parent
@_ignore_inferred
def infer_assigned_object(pyname):
if not pyname.assignments:
return
for assignment in reversed(pyname.assignments):
result = _infer_assignment(assignment, pyname.module)
if result is not None:
return result
def get_passed_objects(pyfunction, parameter_index):
object_info = pyfunction.pycore.object_info
result = object_info.get_passed_objects(pyfunction,
parameter_index)
if not result:
statically_inferred = _parameter_objects(pyfunction)
if len(statically_inferred) > parameter_index:
result.append(statically_inferred[parameter_index])
return result
def _infer_returned(pyobject, args):
if args:
# HACK: Setting parameter objects manually
# This is not thread safe and might cause problems if `args`
# does not come from a good call site
pyobject.get_scope().invalidate_data()
pyobject._set_parameter_pyobjects(
args.get_arguments(pyobject.get_param_names(special_args=False)))
scope = pyobject.get_scope()
if not scope._get_returned_asts():
return
maxtries = 3
for returned_node in reversed(scope._get_returned_asts()[-maxtries:]):
try:
resulting_pyname = evaluate.eval_node(scope, returned_node)
if resulting_pyname is None:
continue
pyobject = resulting_pyname.get_object()
if pyobject == rope.base.pyobjects.get_unknown():
continue
if not scope._is_generator():
return pyobject
else:
return rope.base.builtins.get_generator(pyobject)
except rope.base.pyobjects.IsBeingInferredError:
pass
def _parameter_objects(pyobject):
params = pyobject.get_param_names(special_args=False)
return [rope.base.pyobjects.get_unknown()] * len(params)
# handling `rope.base.pynames.AssignmentValue`
@_ignore_inferred
def _infer_assignment(assignment, pymodule):
result = _follow_pyname(assignment, pymodule)
if result is None:
return None
pyname, pyobject = result
pyobject = _follow_evaluations(assignment, pyname, pyobject)
if pyobject is None:
return None
return _follow_levels(assignment, pyobject)
def _follow_levels(assignment, pyobject):
for index in assignment.levels:
if isinstance(pyobject.get_type(), rope.base.builtins.Tuple):
holdings = pyobject.get_type().get_holding_objects()
if holdings:
pyobject = holdings[min(len(holdings) - 1, index)]
else:
pyobject = None
elif isinstance(pyobject.get_type(), rope.base.builtins.List):
pyobject = pyobject.get_type().holding
else:
pyobject = None
if pyobject is None:
break
return pyobject
@_ignore_inferred
def _follow_pyname(assignment, pymodule, lineno=None):
assign_node = assignment.ast_node
if lineno is None:
lineno = _get_lineno_for_node(assign_node)
holding_scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
pyname = evaluate.eval_node(holding_scope, assign_node)
if pyname is not None:
result = pyname.get_object()
if isinstance(result.get_type(), rope.base.builtins.Property) and \
holding_scope.get_kind() == 'Class':
arg = rope.base.pynames.UnboundName(
rope.base.pyobjects.PyObject(holding_scope.pyobject))
return pyname, result.get_type().get_property_object(
arguments.ObjectArguments([arg]))
return pyname, result
@_ignore_inferred
def _follow_evaluations(assignment, pyname, pyobject):
new_pyname = pyname
tokens = assignment.evaluation.split('.')
for token in tokens:
call = token.endswith('()')
if call:
token = token[:-2]
if token:
pyname = new_pyname
new_pyname = _get_attribute(pyobject, token)
if new_pyname is not None:
pyobject = new_pyname.get_object()
if pyobject is not None and call:
if isinstance(pyobject, rope.base.pyobjects.AbstractFunction):
args = arguments.ObjectArguments([pyname])
pyobject = pyobject.get_returned_object(args)
else:
pyobject = None
if pyobject is None:
break
if pyobject is not None and assignment.assign_type:
return rope.base.pyobjects.PyObject(pyobject)
return pyobject
def _get_lineno_for_node(assign_node):
if hasattr(assign_node, 'lineno') and \
assign_node.lineno is not None:
return assign_node.lineno
return 1
def _get_attribute(pyobject, name):
if pyobject is not None and name in pyobject:
return pyobject[name] | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/base/oi/soi.py | 0.453988 | 0.187914 | soi.py | pypi |
from rope.base import (change, taskhandle, evaluate,
exceptions, pyobjects, pynames, ast)
from rope.refactor import restructure, sourceutils, similarfinder, importutils
class UseFunction(object):
"""Try to use a function wherever possible"""
def __init__(self, project, resource, offset):
self.project = project
self.offset = offset
this_pymodule = project.pycore.resource_to_pyobject(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
if pyname is None:
raise exceptions.RefactoringError('Unresolvable name selected')
self.pyfunction = pyname.get_object()
if not isinstance(self.pyfunction, pyobjects.PyFunction) or \
not isinstance(self.pyfunction.parent, pyobjects.PyModule):
raise exceptions.RefactoringError(
'Use function works for global functions, only.')
self.resource = self.pyfunction.get_module().get_resource()
self._check_returns()
def _check_returns(self):
node = self.pyfunction.get_ast()
if _yield_count(node):
raise exceptions.RefactoringError('Use function should not '
'be used on generators.')
returns = _return_count(node)
if returns > 1:
raise exceptions.RefactoringError('usefunction: Function has more '
'than one return statement.')
if returns == 1 and not _returns_last(node):
raise exceptions.RefactoringError('usefunction: return should '
'be the last statement.')
def get_changes(self, resources=None,
task_handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.project.pycore.get_python_files()
changes = change.ChangeSet('Using function <%s>' %
self.pyfunction.get_name())
if self.resource in resources:
newresources = list(resources)
newresources.remove(self.resource)
for c in self._restructure(newresources, task_handle).changes:
changes.add_change(c)
if self.resource in resources:
for c in self._restructure([self.resource], task_handle,
others=False).changes:
changes.add_change(c)
return changes
def get_function_name(self):
return self.pyfunction.get_name()
def _restructure(self, resources, task_handle, others=True):
body = self._get_body()
pattern = self._make_pattern()
goal = self._make_goal(import_=others)
imports = None
if others:
imports = ['import %s' % self._module_name()]
body_region = sourceutils.get_body_region(self.pyfunction)
args_value = {'skip': (self.resource, body_region)}
args = {'': args_value}
restructuring = restructure.Restructure(
self.project, pattern, goal, args=args, imports=imports)
return restructuring.get_changes(resources=resources,
task_handle=task_handle)
def _find_temps(self):
return find_temps(self.project, self._get_body())
def _module_name(self):
return self.project.pycore.modname(self.resource)
def _make_pattern(self):
params = self.pyfunction.get_param_names()
body = self._get_body()
body = restructure.replace(body, 'return', 'pass')
wildcards = list(params)
wildcards.extend(self._find_temps())
if self._does_return():
if self._is_expression():
replacement = '${%s}' % self._rope_returned
else:
replacement = '%s = ${%s}' % (self._rope_result,
self._rope_returned)
body = restructure.replace(
body, 'return ${%s}' % self._rope_returned,
replacement)
wildcards.append(self._rope_result)
return similarfinder.make_pattern(body, wildcards)
def _get_body(self):
return sourceutils.get_body(self.pyfunction)
def _make_goal(self, import_=False):
params = self.pyfunction.get_param_names()
function_name = self.pyfunction.get_name()
if import_:
function_name = self._module_name() + '.' + function_name
goal = '%s(%s)' % (function_name,
', ' .join(('${%s}' % p) for p in params))
if self._does_return() and not self._is_expression():
goal = '${%s} = %s' % (self._rope_result, goal)
return goal
def _does_return(self):
body = self._get_body()
removed_return = restructure.replace(body, 'return ${result}', '')
return removed_return != body
def _is_expression(self):
return len(self.pyfunction.get_ast().body) == 1
_rope_result = '_rope__result'
_rope_returned = '_rope__returned'
def find_temps(project, code):
code = 'def f():\n' + sourceutils.indent_lines(code, 4)
pymodule = project.pycore.get_string_module(code)
result = []
function_scope = pymodule.get_scope().get_scopes()[0]
for name, pyname in function_scope.get_names().items():
if isinstance(pyname, pynames.AssignedName):
result.append(name)
return result
def _returns_last(node):
return node.body and isinstance(node.body[-1], ast.Return)
def _yield_count(node):
visitor = _ReturnOrYieldFinder()
visitor.start_walking(node)
return visitor.yields
def _return_count(node):
visitor = _ReturnOrYieldFinder()
visitor.start_walking(node)
return visitor.returns
class _ReturnOrYieldFinder(object):
def __init__(self):
self.returns = 0
self.yields = 0
def _Return(self, node):
self.returns += 1
def _Yield(self, node):
self.yields += 1
def _FunctionDef(self, node):
pass
def _ClassDef(self, node):
pass
def start_walking(self, node):
nodes = [node]
if isinstance(node, ast.FunctionDef):
nodes = ast.get_child_nodes(node)
for child in nodes:
ast.walk(child, self) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/usefunction.py | 0.720958 | 0.200401 | usefunction.py | pypi |
import collections
import re
import warnings
from rope.base import ast, codeanalyze, exceptions
def get_patched_ast(source, sorted_children=False):
"""Adds ``region`` and ``sorted_children`` fields to nodes
Adds ``sorted_children`` field only if `sorted_children` is True.
"""
return patch_ast(ast.parse(source), source, sorted_children)
def patch_ast(node, source, sorted_children=False):
"""Patches the given node
After calling, each node in `node` will have a new field named
`region` that is a tuple containing the start and end offsets
of the code that generated it.
If `sorted_children` is true, a `sorted_children` field will
be created for each node, too. It is a list containing child
nodes as well as whitespaces and comments that occur between
them.
"""
if hasattr(node, 'region'):
return node
walker = _PatchingASTWalker(source, children=sorted_children)
ast.call_for_nodes(node, walker)
return node
def node_region(patched_ast_node):
"""Get the region of a patched ast node"""
return patched_ast_node.region
def write_ast(patched_ast_node):
"""Extract source form a patched AST node with `sorted_children` field
If the node is patched with sorted_children turned off you can use
`node_region` function for obtaining code using module source code.
"""
result = []
for child in patched_ast_node.sorted_children:
if isinstance(child, ast.AST):
result.append(write_ast(child))
else:
result.append(child)
return ''.join(result)
class MismatchedTokenError(exceptions.RopeError):
pass
class _PatchingASTWalker(object):
def __init__(self, source, children=False):
self.source = _Source(source)
self.children = children
self.lines = codeanalyze.SourceLinesAdapter(source)
self.children_stack = []
Number = object()
String = object()
def __call__(self, node):
method = getattr(self, '_' + node.__class__.__name__, None)
if method is not None:
return method(node)
# ???: Unknown node; what should we do here?
warnings.warn('Unknown node type <%s>; please report!'
% node.__class__.__name__, RuntimeWarning)
node.region = (self.source.offset, self.source.offset)
if self.children:
node.sorted_children = ast.get_children(node)
def _handle(self, node, base_children, eat_parens=False, eat_spaces=False):
if hasattr(node, 'region'):
# ???: The same node was seen twice; what should we do?
warnings.warn(
'Node <%s> has been already patched; please report!' %
node.__class__.__name__, RuntimeWarning)
return
base_children = collections.deque(base_children)
self.children_stack.append(base_children)
children = collections.deque()
formats = []
suspected_start = self.source.offset
start = suspected_start
first_token = True
while base_children:
child = base_children.popleft()
if child is None:
continue
offset = self.source.offset
if isinstance(child, ast.arg):
region = self.source.consume(child.arg)
child = self.source[region[0]:region[1]]
token_start = offset
elif isinstance(child, ast.AST):
ast.call_for_nodes(child, self)
token_start = child.region[0]
else:
if child is self.String:
region = self.source.consume_string(
end=self._find_next_statement_start())
elif child is self.Number:
region = self.source.consume_number()
elif child == '!=':
# INFO: This has been added to handle deprecated ``<>``
region = self.source.consume_not_equal()
else:
region = self.source.consume(child)
child = self.source[region[0]:region[1]]
token_start = region[0]
if not first_token:
formats.append(self.source[offset:token_start])
if self.children:
children.append(self.source[offset:token_start])
else:
first_token = False
start = token_start
if self.children:
children.append(child)
start = self._handle_parens(children, start, formats)
if eat_parens:
start = self._eat_surrounding_parens(
children, suspected_start, start)
if eat_spaces:
if self.children:
children.appendleft(self.source[0:start])
end_spaces = self.source[self.source.offset:]
self.source.consume(end_spaces)
if self.children:
children.append(end_spaces)
start = 0
if self.children:
node.sorted_children = children
node.region = (start, self.source.offset)
self.children_stack.pop()
def _handle_parens(self, children, start, formats):
"""Changes `children` and returns new start"""
opens, closes = self._count_needed_parens(formats)
old_end = self.source.offset
new_end = None
for i in range(closes):
new_end = self.source.consume(')')[1]
if new_end is not None:
if self.children:
children.append(self.source[old_end:new_end])
new_start = start
for i in range(opens):
new_start = self.source.rfind_token('(', 0, new_start)
if new_start != start:
if self.children:
children.appendleft(self.source[new_start:start])
start = new_start
return start
def _eat_surrounding_parens(self, children, suspected_start, start):
index = self.source.rfind_token('(', suspected_start, start)
if index is not None:
old_start = start
old_offset = self.source.offset
start = index
if self.children:
children.appendleft(self.source[start + 1:old_start])
children.appendleft('(')
token_start, token_end = self.source.consume(')')
if self.children:
children.append(self.source[old_offset:token_start])
children.append(')')
return start
def _count_needed_parens(self, children):
start = 0
opens = 0
for child in children:
if not isinstance(child, str):
continue
if child == '' or child[0] in '\'"':
continue
index = 0
while index < len(child):
if child[index] == ')':
if opens > 0:
opens -= 1
else:
start += 1
if child[index] == '(':
opens += 1
if child[index] == '#':
try:
index = child.index('\n', index)
except ValueError:
break
index += 1
return start, opens
def _find_next_statement_start(self):
for children in reversed(self.children_stack):
for child in children:
if isinstance(child, ast.stmt):
return child.col_offset \
+ self.lines.get_line_start(child.lineno)
return len(self.source.source)
_operators = {'And': 'and', 'Or': 'or', 'Add': '+', 'Sub': '-', 'Mult': '*',
'Div': '/', 'Mod': '%', 'Pow': '**', 'LShift': '<<',
'RShift': '>>', 'BitOr': '|', 'BitAnd': '&', 'BitXor': '^',
'FloorDiv': '//', 'Invert': '~', 'Not': 'not', 'UAdd': '+',
'USub': '-', 'Eq': '==', 'NotEq': '!=', 'Lt': '<',
'LtE': '<=', 'Gt': '>', 'GtE': '>=', 'Is': 'is',
'IsNot': 'is not', 'In': 'in', 'NotIn': 'not in'}
def _get_op(self, node):
return self._operators[node.__class__.__name__].split(' ')
def _Attribute(self, node):
self._handle(node, [node.value, '.', node.attr])
def _Assert(self, node):
children = ['assert', node.test]
if node.msg:
children.append(',')
children.append(node.msg)
self._handle(node, children)
def _Assign(self, node):
children = self._child_nodes(node.targets, '=')
children.append('=')
children.append(node.value)
self._handle(node, children)
def _AugAssign(self, node):
children = [node.target]
children.extend(self._get_op(node.op))
children.extend(['=', node.value])
self._handle(node, children)
def _Repr(self, node):
self._handle(node, ['`', node.value, '`'])
def _BinOp(self, node):
children = [node.left] + self._get_op(node.op) + [node.right]
self._handle(node, children)
def _BoolOp(self, node):
self._handle(node, self._child_nodes(node.values,
self._get_op(node.op)[0]))
def _Break(self, node):
self._handle(node, ['break'])
def _Call(self, node):
children = [node.func, '(']
args = list(node.args) + node.keywords
children.extend(self._child_nodes(args, ','))
if node.starargs is not None:
if args:
children.append(',')
children.extend(['*', node.starargs])
if node.kwargs is not None:
if args or node.starargs is not None:
children.append(',')
children.extend(['**', node.kwargs])
children.append(')')
self._handle(node, children)
def _ClassDef(self, node):
children = []
if getattr(node, 'decorator_list', None):
for decorator in node.decorator_list:
children.append('@')
children.append(decorator)
children.extend(['class', node.name])
if node.bases:
children.append('(')
children.extend(self._child_nodes(node.bases, ','))
children.append(')')
children.append(':')
children.extend(node.body)
self._handle(node, children)
def _Compare(self, node):
children = []
children.append(node.left)
for op, expr in zip(node.ops, node.comparators):
children.extend(self._get_op(op))
children.append(expr)
self._handle(node, children)
def _Delete(self, node):
self._handle(node, ['del'] + self._child_nodes(node.targets, ','))
def _Num(self, node):
self._handle(node, [self.Number])
def _Str(self, node):
self._handle(node, [self.String])
def _Continue(self, node):
self._handle(node, ['continue'])
def _Dict(self, node):
children = []
children.append('{')
if node.keys:
for index, (key, value) in enumerate(list(zip(node.keys, node.values))):
children.extend([key, ':', value])
if index < len(node.keys) - 1:
children.append(',')
children.append('}')
self._handle(node, children)
def _Ellipsis(self, node):
self._handle(node, ['...'])
def _Expr(self, node):
self._handle(node, [node.value])
def _Exec(self, node):
children = []
children.extend(['exec', node.body])
if node.globals:
children.extend(['in', node.globals])
if node.locals:
children.extend([',', node.locals])
self._handle(node, children)
def _ExtSlice(self, node):
children = []
for index, dim in enumerate(node.dims):
if index > 0:
children.append(',')
children.append(dim)
self._handle(node, children)
def _For(self, node):
children = ['for', node.target, 'in', node.iter, ':']
children.extend(node.body)
if node.orelse:
children.extend(['else', ':'])
children.extend(node.orelse)
self._handle(node, children)
def _ImportFrom(self, node):
children = ['from']
if node.level:
children.append('.' * node.level)
children.extend([node.module or '', # see comment at rope.base.ast.walk
'import'])
children.extend(self._child_nodes(node.names, ','))
self._handle(node, children)
def _alias(self, node):
children = [node.name]
if node.asname:
children.extend(['as', node.asname])
self._handle(node, children)
def _FunctionDef(self, node):
children = []
try:
decorators = getattr(node, 'decorator_list')
except AttributeError:
decorators = getattr(node, 'decorators', None)
if decorators:
for decorator in decorators:
children.append('@')
children.append(decorator)
children.extend(['def', node.name, '(', node.args])
children.extend([')', ':'])
children.extend(node.body)
self._handle(node, children)
def _arguments(self, node):
children = []
args = list(node.args)
defaults = [None] * (len(args) - len(node.defaults)) + list(node.defaults)
for index, (arg, default) in enumerate(list(zip(args, defaults))):
if index > 0:
children.append(',')
self._add_args_to_children(children, arg, default)
if node.vararg is not None:
if args:
children.append(',')
children.extend(['*', node.vararg])
if node.kwarg is not None:
if args or node.vararg is not None:
children.append(',')
children.extend(['**', node.kwarg])
self._handle(node, children)
def _add_args_to_children(self, children, arg, default):
if isinstance(arg, (list, tuple)):
self._add_tuple_parameter(children, arg)
else:
children.append(arg)
if default is not None:
children.append('=')
children.append(default)
def _add_tuple_parameter(self, children, arg):
children.append('(')
for index, token in enumerate(arg):
if index > 0:
children.append(',')
if isinstance(token, (list, tuple)):
self._add_tuple_parameter(children, token)
else:
children.append(token)
children.append(')')
def _GeneratorExp(self, node):
children = [node.elt]
children.extend(node.generators)
self._handle(node, children, eat_parens=True)
def _comprehension(self, node):
children = ['for', node.target, 'in', node.iter]
if node.ifs:
for if_ in node.ifs:
children.append('if')
children.append(if_)
self._handle(node, children)
def _Global(self, node):
children = self._child_nodes(node.names, ',')
children.insert(0, 'global')
self._handle(node, children)
def _If(self, node):
if self._is_elif(node):
children = ['elif']
else:
children = ['if']
children.extend([node.test, ':'])
children.extend(node.body)
if node.orelse:
if len(node.orelse) == 1 and self._is_elif(node.orelse[0]):
pass
else:
children.extend(['else', ':'])
children.extend(node.orelse)
self._handle(node, children)
def _is_elif(self, node):
if not isinstance(node, ast.If):
return False
offset = self.lines.get_line_start(node.lineno) + node.col_offset
word = self.source[offset:offset + 4]
# XXX: This is a bug; the offset does not point to the first
alt_word = self.source[offset - 5:offset - 1]
return 'elif' in (word, alt_word)
def _IfExp(self, node):
return self._handle(node, [node.body, 'if', node.test,
'else', node.orelse])
def _Import(self, node):
children = ['import']
children.extend(self._child_nodes(node.names, ','))
self._handle(node, children)
def _keyword(self, node):
self._handle(node, [node.arg, '=', node.value])
def _Lambda(self, node):
self._handle(node, ['lambda', node.args, ':', node.body])
def _List(self, node):
self._handle(node, ['['] + self._child_nodes(node.elts, ',') + [']'])
def _ListComp(self, node):
children = ['[', node.elt]
children.extend(node.generators)
children.append(']')
self._handle(node, children)
def _Module(self, node):
self._handle(node, list(node.body), eat_spaces=True)
def _Name(self, node):
self._handle(node, [node.id])
def _Pass(self, node):
self._handle(node, ['pass'])
def _Print(self, node):
children = ['print']
if node.dest:
children.extend(['>>', node.dest])
if node.values:
children.append(',')
children.extend(self._child_nodes(node.values, ','))
if not node.nl:
children.append(',')
self._handle(node, children)
def _Raise(self, node):
children = ['raise']
if node.cause:
children.append(node.cause)
if node.exc:
children.append(node.exc)
self._handle(node, children)
def _Return(self, node):
children = ['return']
if node.value:
children.append(node.value)
self._handle(node, children)
def _Sliceobj(self, node):
children = []
for index, slice in enumerate(node.nodes):
if index > 0:
children.append(':')
if slice:
children.append(slice)
self._handle(node, children)
def _Index(self, node):
self._handle(node, [node.value])
def _Subscript(self, node):
self._handle(node, [node.value, '[', node.slice, ']'])
def _Slice(self, node):
children = []
if node.lower:
children.append(node.lower)
children.append(':')
if node.upper:
children.append(node.upper)
if node.step:
children.append(':')
children.append(node.step)
self._handle(node, children)
def _TryFinally(self, node):
children = []
if len(node.body) != 1 or not isinstance(node.body[0], ast.TryExcept):
children.extend(['try', ':'])
children.extend(node.body)
children.extend(['finally', ':'])
children.extend(node.finalbody)
self._handle(node, children)
def _TryExcept(self, node):
children = ['try', ':']
children.extend(node.body)
children.extend(node.handlers)
if node.orelse:
children.extend(['else', ':'])
children.extend(node.orelse)
self._handle(node, children)
def _ExceptHandler(self, node):
self._excepthandler(node)
def _excepthandler(self, node):
children = ['except']
if node.type:
children.append(node.type)
if node.name:
children.extend(['as', node.name])
children.append(':')
children.extend(node.body)
self._handle(node, children)
def _Tuple(self, node):
if node.elts:
self._handle(node, self._child_nodes(node.elts, ','),
eat_parens=True)
else:
self._handle(node, ['(', ')'])
def _UnaryOp(self, node):
children = self._get_op(node.op)
children.append(node.operand)
self._handle(node, children)
def _Yield(self, node):
children = ['yield']
if node.value:
children.append(node.value)
self._handle(node, children)
def _While(self, node):
children = ['while', node.test, ':']
children.extend(node.body)
if node.orelse:
children.extend(['else', ':'])
children.extend(node.orelse)
self._handle(node, children)
def _With(self, node):
children = ['with', node.context_expr]
if node.optional_vars:
children.extend(['as', node.optional_vars])
children.append(':')
children.extend(node.body)
self._handle(node, children)
def _child_nodes(self, nodes, separator):
children = []
for index, child in enumerate(nodes):
children.append(child)
if index < len(nodes) - 1:
children.append(separator)
return children
class _Source(object):
def __init__(self, source):
self.source = source
self.offset = 0
def consume(self, token):
try:
while True:
new_offset = self.source.index(token, self.offset)
if self._good_token(token, new_offset):
break
else:
self._skip_comment()
except (ValueError, TypeError):
raise MismatchedTokenError(
'Token <%s> at %s cannot be matched' %
(token, self._get_location()))
self.offset = new_offset + len(token)
return (new_offset, self.offset)
def consume_string(self, end=None):
if _Source._string_pattern is None:
original = codeanalyze.get_string_pattern()
pattern = r'(%s)((\s|\\\n|#[^\n]*\n)*(%s))*' % \
(original, original)
_Source._string_pattern = re.compile(pattern)
repattern = _Source._string_pattern
return self._consume_pattern(repattern, end)
def consume_number(self):
if _Source._number_pattern is None:
_Source._number_pattern = re.compile(
self._get_number_pattern())
repattern = _Source._number_pattern
return self._consume_pattern(repattern)
def consume_not_equal(self):
if _Source._not_equals_pattern is None:
_Source._not_equals_pattern = re.compile(r'<>|!=')
repattern = _Source._not_equals_pattern
return self._consume_pattern(repattern)
def _good_token(self, token, offset, start=None):
"""Checks whether consumed token is in comments"""
if start is None:
start = self.offset
try:
comment_index = self.source.rindex('#', start, offset)
except ValueError:
return True
try:
new_line_index = self.source.rindex('\n', start, offset)
except ValueError:
return False
return comment_index < new_line_index
def _skip_comment(self):
self.offset = self.source.index('\n', self.offset + 1)
def _get_location(self):
lines = self.source[:self.offset].split('\n')
return (len(lines), len(lines[-1]))
def _consume_pattern(self, repattern, end=None):
while True:
if end is None:
end = len(self.source)
match = repattern.search(self.source, self.offset, end)
if self._good_token(match.group(), match.start()):
break
else:
self._skip_comment()
self.offset = match.end()
return match.start(), match.end()
def till_token(self, token):
new_offset = self.source.index(token, self.offset)
return self[self.offset:new_offset]
def rfind_token(self, token, start, end):
index = start
while True:
try:
index = self.source.rindex(token, start, end)
if self._good_token(token, index, start=start):
return index
else:
end = index
except ValueError:
return None
def from_offset(self, offset):
return self[offset:self.offset]
def find_backwards(self, pattern, offset):
return self.source.rindex(pattern, 0, offset)
def __getitem__(self, index):
return self.source[index]
def __getslice__(self, i, j):
return self.source[i:j]
def _get_number_pattern(self):
# HACK: It is merely an approaximation and does the job
integer = r'(0|0x)?[\da-fA-F]+[lL]?'
return r'(%s(\.\d*)?|(\.\d+))([eE][-+]?\d*)?[jJ]?' % integer
_string_pattern = None
_number_pattern = None
_not_equals_pattern = None | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/patchedast.py | 0.549761 | 0.296132 | patchedast.py | pypi |
from rope.base import pyobjects, codeanalyze, exceptions, pynames, taskhandle, evaluate, worder
from rope.base.change import ChangeSet, ChangeContents, MoveResource
from rope.refactor import importutils, rename, occurrences, sourceutils, functionutils
def create_move(project, resource, offset=None):
"""A factory for creating Move objects
Based on `resource` and `offset`, return one of `MoveModule`,
`MoveGlobal` or `MoveMethod` for performing move refactoring.
"""
if offset is None:
return MoveModule(project, resource)
this_pymodule = project.pycore.resource_to_pyobject(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
if pyname is None:
raise exceptions.RefactoringError(
'Move only works on classes, functions, modules and methods.')
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.PyModule) or \
isinstance(pyobject, pyobjects.PyPackage):
return MoveModule(project, pyobject.get_resource())
if isinstance(pyobject, pyobjects.PyFunction) and \
isinstance(pyobject.parent, pyobjects.PyClass):
return MoveMethod(project, resource, offset)
if isinstance(pyobject, pyobjects.PyDefinedObject) and \
isinstance(pyobject.parent, pyobjects.PyModule):
return MoveGlobal(project, resource, offset)
raise exceptions.RefactoringError(
'Move only works on global classes/functions, modules and methods.')
class MoveMethod(object):
"""For moving methods
It makes a new method in the destination class and changes
the body of the old method to call the new method. You can
inline the old method to change all of its occurrences.
"""
def __init__(self, project, resource, offset):
self.project = project
self.pycore = project.pycore
this_pymodule = self.pycore.resource_to_pyobject(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
self.method_name = worder.get_name_at(resource, offset)
self.pyfunction = pyname.get_object()
if self.pyfunction.get_kind() != 'method':
raise exceptions.RefactoringError('Only normal methods'
' can be moved.')
def get_changes(self, dest_attr, new_name=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
"""
changes = ChangeSet('Moving method <%s>' % self.method_name)
if resources is None:
resources = self.pycore.get_python_files()
if new_name is None:
new_name = self.get_method_name()
resource1, start1, end1, new_content1 = \
self._get_changes_made_by_old_class(dest_attr, new_name)
collector1 = codeanalyze.ChangeCollector(resource1.read())
collector1.add_change(start1, end1, new_content1)
resource2, start2, end2, new_content2 = \
self._get_changes_made_by_new_class(dest_attr, new_name)
if resource1 == resource2:
collector1.add_change(start2, end2, new_content2)
else:
collector2 = codeanalyze.ChangeCollector(resource2.read())
collector2.add_change(start2, end2, new_content2)
result = collector2.get_changed()
import_tools = importutils.ImportTools(self.pycore)
new_imports = self._get_used_imports(import_tools)
if new_imports:
goal_pymodule = self.pycore.get_string_module(result,
resource2)
result = _add_imports_to_module(
import_tools, goal_pymodule, new_imports)
if resource2 in resources:
changes.add_change(ChangeContents(resource2, result))
if resource1 in resources:
changes.add_change(ChangeContents(resource1,
collector1.get_changed()))
return changes
def get_method_name(self):
return self.method_name
def _get_used_imports(self, import_tools):
return importutils.get_imports(self.pycore, self.pyfunction)
def _get_changes_made_by_old_class(self, dest_attr, new_name):
pymodule = self.pyfunction.get_module()
indents = self._get_scope_indents(self.pyfunction)
body = 'return self.%s.%s(%s)\n' % (dest_attr, new_name,
self._get_passed_arguments_string())
region = sourceutils.get_body_region(self.pyfunction)
return (pymodule.get_resource(), region[0], region[1],
sourceutils.fix_indentation(body, indents))
def _get_scope_indents(self, pyobject):
pymodule = pyobject.get_module()
return sourceutils.get_indents(
pymodule.lines, pyobject.get_scope().get_start()) + \
sourceutils.get_indent(self.pycore)
def _get_changes_made_by_new_class(self, dest_attr, new_name):
old_pyclass = self.pyfunction.parent
if dest_attr not in old_pyclass:
raise exceptions.RefactoringError(
'Destination attribute <%s> not found' % dest_attr)
pyclass = old_pyclass[dest_attr].get_object().get_type()
if not isinstance(pyclass, pyobjects.PyClass):
raise exceptions.RefactoringError(
'Unknown class type for attribute <%s>' % dest_attr)
pymodule = pyclass.get_module()
resource = pyclass.get_module().get_resource()
start, end = sourceutils.get_body_region(pyclass)
pre_blanks = '\n'
if pymodule.source_code[start:end].strip() != 'pass':
pre_blanks = '\n\n'
start = end
indents = self._get_scope_indents(pyclass)
body = pre_blanks + sourceutils.fix_indentation(
self.get_new_method(new_name), indents)
return resource, start, end, body
def get_new_method(self, name):
return '%s\n%s' % (
self._get_new_header(name),
sourceutils.fix_indentation(self._get_body(),
sourceutils.get_indent(self.pycore)))
def _get_unchanged_body(self):
return sourceutils.get_body(self.pyfunction)
def _get_body(self, host='host'):
self_name = self._get_self_name()
body = self_name + ' = None\n' + self._get_unchanged_body()
pymodule = self.pycore.get_string_module(body)
finder = occurrences.create_finder(
self.pycore, self_name, pymodule[self_name])
result = rename.rename_in_module(finder, host, pymodule=pymodule)
if result is None:
result = body
return result[result.index('\n') + 1:]
def _get_self_name(self):
return self.pyfunction.get_param_names()[0]
def _get_new_header(self, name):
header = 'def %s(self' % name
if self._is_host_used():
header += ', host'
definition_info = functionutils.DefinitionInfo.read(self.pyfunction)
others = definition_info.arguments_to_string(1)
if others:
header += ', ' + others
return header + '):'
def _get_passed_arguments_string(self):
result = ''
if self._is_host_used():
result = 'self'
definition_info = functionutils.DefinitionInfo.read(self.pyfunction)
others = definition_info.arguments_to_string(1)
if others:
if result:
result += ', '
result += others
return result
def _is_host_used(self):
return self._get_body('__old_self') != self._get_unchanged_body()
class MoveGlobal(object):
"""For moving global function and classes"""
def __init__(self, project, resource, offset):
self.pycore = project.pycore
this_pymodule = self.pycore.resource_to_pyobject(resource)
self.old_pyname = evaluate.eval_location(this_pymodule, offset)
self.old_name = self.old_pyname.get_object().get_name()
pymodule = self.old_pyname.get_object().get_module()
self.source = pymodule.get_resource()
self.tools = _MoveTools(self.pycore, self.source,
self.old_pyname, self.old_name)
self.import_tools = self.tools.import_tools
self._check_exceptional_conditions()
def _check_exceptional_conditions(self):
if self.old_pyname is None or \
not isinstance(self.old_pyname.get_object(), pyobjects.PyDefinedObject):
raise exceptions.RefactoringError(
'Move refactoring should be performed on a class/function.')
moving_pyobject = self.old_pyname.get_object()
if not self._is_global(moving_pyobject):
raise exceptions.RefactoringError(
'Move refactoring should be performed on a global class/function.')
def _is_global(self, pyobject):
return pyobject.get_scope().parent == pyobject.get_module().get_scope()
def get_changes(self, dest, resources=None,
task_handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.pycore.get_python_files()
if dest is None or not dest.exists():
raise exceptions.RefactoringError(
'Move destination does not exist.')
if dest.is_folder() and dest.has_child('__init__.py'):
dest = dest.get_child('__init__.py')
if dest.is_folder():
raise exceptions.RefactoringError(
'Move destination for non-modules should not be folders.')
if self.source == dest:
raise exceptions.RefactoringError(
'Moving global elements to the same module.')
return self._calculate_changes(dest, resources, task_handle)
def _calculate_changes(self, dest, resources, task_handle):
changes = ChangeSet('Moving global <%s>' % self.old_name)
job_set = task_handle.create_jobset('Collecting Changes',
len(resources))
for file_ in resources:
job_set.started_job(file_.path)
if file_ == self.source:
changes.add_change(self._source_module_changes(dest))
elif file_ == dest:
changes.add_change(self._dest_module_changes(dest))
elif self.tools.occurs_in_module(resource=file_):
pymodule = self.pycore.resource_to_pyobject(file_)
# Changing occurrences
placeholder = '__rope_renaming_%s_' % self.old_name
source = self.tools.rename_in_module(placeholder,
resource=file_)
should_import = source is not None
# Removing out of date imports
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.remove_old_imports(pymodule)
# Adding new import
if should_import:
pymodule = self.tools.new_pymodule(pymodule, source)
source, imported = importutils.add_import(
self.pycore, pymodule, self._new_modname(dest), self.old_name)
source = source.replace(placeholder, imported)
source = self.tools.new_source(pymodule, source)
if source != file_.read():
changes.add_change(ChangeContents(file_, source))
job_set.finished_job()
return changes
def _source_module_changes(self, dest):
placeholder = '__rope_moving_%s_' % self.old_name
handle = _ChangeMoveOccurrencesHandle(placeholder)
occurrence_finder = occurrences.create_finder(
self.pycore, self.old_name, self.old_pyname)
start, end = self._get_moving_region()
renamer = ModuleSkipRenamer(occurrence_finder, self.source,
handle, start, end)
source = renamer.get_changed_module()
if handle.occurred:
pymodule = self.pycore.get_string_module(source, self.source)
# Adding new import
source, imported = importutils.add_import(
self.pycore, pymodule, self._new_modname(dest), self.old_name)
source = source.replace(placeholder, imported)
return ChangeContents(self.source, source)
def _new_modname(self, dest):
return self.pycore.modname(dest)
def _dest_module_changes(self, dest):
# Changing occurrences
pymodule = self.pycore.resource_to_pyobject(dest)
source = self.tools.rename_in_module(self.old_name, pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
moving, imports = self._get_moving_element_with_imports()
source = self.tools.remove_old_imports(pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
pymodule, has_changed = self._add_imports2(pymodule, imports)
module_with_imports = self.import_tools.module_imports(pymodule)
source = pymodule.source_code
lineno = 0
if module_with_imports.imports:
lineno = module_with_imports.imports[-1].end_line - 1
else:
while lineno < pymodule.lines.length() and \
pymodule.lines.get_line(lineno + 1).lstrip().startswith('#'):
lineno += 1
if lineno > 0:
cut = pymodule.lines.get_line_end(lineno) + 1
result = source[:cut] + '\n\n' + moving + source[cut:]
else:
result = moving + source
# Organizing imports
source = result
pymodule = self.pycore.get_string_module(source, dest)
source = self.import_tools.organize_imports(pymodule, sort=False,
unused=False)
return ChangeContents(dest, source)
def _get_moving_element_with_imports(self):
return moving_code_with_imports(
self.pycore, self.source, self._get_moving_element())
def _get_module_with_imports(self, source_code, resource):
pymodule = self.pycore.get_string_module(source_code, resource)
return self.import_tools.module_imports(pymodule)
def _get_moving_element(self):
start, end = self._get_moving_region()
moving = self.source.read()[start:end]
return moving.rstrip() + '\n'
def _get_moving_region(self):
pymodule = self.pycore.resource_to_pyobject(self.source)
lines = pymodule.lines
scope = self.old_pyname.get_object().get_scope()
start = lines.get_line_start(scope.get_start())
end_line = scope.get_end()
while end_line < lines.length() and \
lines.get_line(end_line + 1).strip() == '':
end_line += 1
end = min(lines.get_line_end(end_line) + 1, len(pymodule.source_code))
return start, end
def _add_imports2(self, pymodule, new_imports):
source = self.tools.add_imports(pymodule, new_imports)
if source is None:
return pymodule, False
else:
resource = pymodule.get_resource()
pymodule = self.pycore.get_string_module(source, resource)
return pymodule, True
class MoveModule(object):
"""For moving modules and packages"""
def __init__(self, project, resource):
self.project = project
self.pycore = project.pycore
if not resource.is_folder() and resource.name == '__init__.py':
resource = resource.parent
if resource.is_folder() and not resource.has_child('__init__.py'):
raise exceptions.RefactoringError(
'Cannot move non-package folder.')
dummy_pymodule = self.pycore.get_string_module('')
self.old_pyname = pynames.ImportedModule(dummy_pymodule,
resource=resource)
self.source = self.old_pyname.get_object().get_resource()
if self.source.is_folder():
self.old_name = self.source.name
else:
self.old_name = self.source.name[:-3]
self.tools = _MoveTools(self.pycore, self.source,
self.old_pyname, self.old_name)
self.import_tools = self.tools.import_tools
def get_changes(self, dest, resources=None,
task_handle=taskhandle.NullTaskHandle()):
moving_pyobject = self.old_pyname.get_object()
if resources is None:
resources = self.pycore.get_python_files()
if dest is None or not dest.is_folder():
raise exceptions.RefactoringError(
'Move destination for modules should be packages.')
return self._calculate_changes(dest, resources, task_handle)
def _calculate_changes(self, dest, resources, task_handle):
changes = ChangeSet('Moving module <%s>' % self.old_name)
job_set = task_handle.create_jobset('Collecting changes',
len(resources))
for module in resources:
job_set.started_job(module.path)
if module == self.source:
self._change_moving_module(changes, dest)
else:
source = self._change_occurrences_in_module(dest,
resource=module)
if source is not None:
changes.add_change(ChangeContents(module, source))
job_set.finished_job()
if self.project == self.source.project:
changes.add_change(MoveResource(self.source, dest.path))
return changes
def _new_modname(self, dest):
destname = self.pycore.modname(dest)
if destname:
return destname + '.' + self.old_name
return self.old_name
def _new_import(self, dest):
return importutils.NormalImport([(self._new_modname(dest), None)])
def _change_moving_module(self, changes, dest):
if not self.source.is_folder():
pymodule = self.pycore.resource_to_pyobject(self.source)
source = self.import_tools.relatives_to_absolutes(pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
source = self._change_occurrences_in_module(dest, pymodule)
source = self.tools.new_source(pymodule, source)
if source != self.source.read():
changes.add_change(ChangeContents(self.source, source))
def _change_occurrences_in_module(self, dest, pymodule=None,
resource=None):
if not self.tools.occurs_in_module(pymodule=pymodule,
resource=resource):
return
if pymodule is None:
pymodule = self.pycore.resource_to_pyobject(resource)
new_name = self._new_modname(dest)
new_import = self._new_import(dest)
source = self.tools.rename_in_module(
new_name, imports=True, pymodule=pymodule, resource=resource)
should_import = self.tools.occurs_in_module(
pymodule=pymodule, resource=resource, imports=False)
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.remove_old_imports(pymodule)
if should_import:
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.add_imports(pymodule, [new_import])
source = self.tools.new_source(pymodule, source)
if source != pymodule.resource.read():
return source
class _ChangeMoveOccurrencesHandle(object):
def __init__(self, new_name):
self.new_name = new_name
self.occurred = False
def occurred_inside_skip(self, change_collector, occurrence):
pass
def occurred_outside_skip(self, change_collector, occurrence):
start, end = occurrence.get_primary_range()
change_collector.add_change(start, end, self.new_name)
self.occurred = True
class _MoveTools(object):
def __init__(self, pycore, source, pyname, old_name):
self.pycore = pycore
self.source = source
self.old_pyname = pyname
self.old_name = old_name
self.import_tools = importutils.ImportTools(self.pycore)
def remove_old_imports(self, pymodule):
old_source = pymodule.source_code
module_with_imports = self.import_tools.module_imports(pymodule)
class CanSelect(object):
changed = False
old_name = self.old_name
old_pyname = self.old_pyname
def __call__(self, name):
try:
if name == self.old_name and \
pymodule[name].get_object() == \
self.old_pyname.get_object():
self.changed = True
return False
except exceptions.AttributeNotFoundError:
pass
return True
can_select = CanSelect()
module_with_imports.filter_names(can_select)
new_source = module_with_imports.get_changed_source()
if old_source != new_source:
return new_source
def rename_in_module(self, new_name, pymodule=None,
imports=False, resource=None):
occurrence_finder = self._create_finder(imports)
source = rename.rename_in_module(
occurrence_finder, new_name, replace_primary=True,
pymodule=pymodule, resource=resource)
return source
def occurs_in_module(self, pymodule=None, resource=None, imports=True):
finder = self._create_finder(imports)
for occurrence in finder.find_occurrences(pymodule=pymodule,
resource=resource):
return True
return False
def _create_finder(self, imports):
return occurrences.create_finder(self.pycore, self.old_name,
self.old_pyname, imports=imports)
def new_pymodule(self, pymodule, source):
if source is not None:
return self.pycore.get_string_module(
source, pymodule.get_resource())
return pymodule
def new_source(self, pymodule, source):
if source is None:
return pymodule.source_code
return source
def add_imports(self, pymodule, new_imports):
return _add_imports_to_module(self.import_tools, pymodule, new_imports)
def _add_imports_to_module(import_tools, pymodule, new_imports):
module_with_imports = import_tools.module_imports(pymodule)
for new_import in new_imports:
module_with_imports.add_import(new_import)
return module_with_imports.get_changed_source()
def moving_code_with_imports(pycore, resource, source):
import_tools = importutils.ImportTools(pycore)
pymodule = pycore.get_string_module(source, resource)
origin = pycore.resource_to_pyobject(resource)
imports = []
for stmt in import_tools.module_imports(origin).imports:
imports.append(stmt.import_info)
back_names = []
for name in origin:
if name not in pymodule:
back_names.append(name)
imports.append(import_tools.get_from_import(resource, back_names))
source = _add_imports_to_module(import_tools, pymodule, imports)
pymodule = pycore.get_string_module(source, resource)
source = import_tools.relatives_to_absolutes(pymodule)
pymodule = pycore.get_string_module(source, resource)
source = import_tools.organize_imports(pymodule, selfs=False)
pymodule = pycore.get_string_module(source, resource)
# extracting imports after changes
module_imports = import_tools.module_imports(pymodule)
imports = [import_stmt.import_info
for import_stmt in module_imports.imports]
start = 1
if module_imports.imports:
start = module_imports.imports[-1].end_line
lines = codeanalyze.SourceLinesAdapter(source)
while start < lines.length() and not lines.get_line(start).strip():
start += 1
moving = source[lines.get_line_start(start):]
return moving, imports
class ModuleSkipRenamerHandle(object):
def occurred_outside_skip(self, change_collector, occurrence):
pass
def occurred_inside_skip(self, change_collector, occurrence):
pass
class ModuleSkipRenamer(object):
"""Rename occurrences in a module
This class can be used when you want to treat a region in a file
separately from other parts when renaming.
"""
def __init__(self, occurrence_finder, resource, handle=None,
skip_start=0, skip_end=0, replacement=''):
"""Constructor
if replacement is `None` the region is not changed. Otherwise
it is replaced with `replacement`.
"""
self.occurrence_finder = occurrence_finder
self.resource = resource
self.skip_start = skip_start
self.skip_end = skip_end
self.replacement = replacement
self.handle = handle
if self.handle is None:
self.handle = ModuleSkipHandle()
def get_changed_module(self):
source = self.resource.read()
change_collector = codeanalyze.ChangeCollector(source)
if self.replacement is not None:
change_collector.add_change(self.skip_start, self.skip_end,
self.replacement)
for occurrence in self.occurrence_finder.find_occurrences(self.resource):
start, end = occurrence.get_primary_range()
if self.skip_start <= start < self.skip_end:
self.handle.occurred_inside_skip(change_collector, occurrence)
else:
self.handle.occurred_outside_skip(change_collector, occurrence)
result = change_collector.get_changed()
if result is not None and result != source:
return result | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/move.py | 0.733643 | 0.231679 | move.py | pypi |
import copy
import rope.base.exceptions
from rope.base import pyobjects, taskhandle, evaluate, worder, codeanalyze, utils
from rope.base.change import ChangeContents, ChangeSet
from rope.refactor import occurrences, functionutils
class ChangeSignature(object):
def __init__(self, project, resource, offset):
self.pycore = project.pycore
self.resource = resource
self.offset = offset
self._set_name_and_pyname()
if self.pyname is None or self.pyname.get_object() is None or \
not isinstance(self.pyname.get_object(), pyobjects.PyFunction):
raise rope.base.exceptions.RefactoringError(
'Change method signature should be performed on functions')
def _set_name_and_pyname(self):
self.name = worder.get_name_at(self.resource, self.offset)
this_pymodule = self.pycore.resource_to_pyobject(self.resource)
self.primary, self.pyname = evaluate.eval_location2(
this_pymodule, self.offset)
if self.pyname is None:
return
pyobject = self.pyname.get_object()
if isinstance(pyobject, pyobjects.PyClass) and \
'__init__' in pyobject:
self.pyname = pyobject['__init__']
self.name = '__init__'
pyobject = self.pyname.get_object()
self.others = None
if self.name == '__init__' and \
isinstance(pyobject, pyobjects.PyFunction) and \
isinstance(pyobject.parent, pyobjects.PyClass):
pyclass = pyobject.parent
self.others = (pyclass.get_name(),
pyclass.parent[pyclass.get_name()])
def _change_calls(self, call_changer, in_hierarchy=None, resources=None,
handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.pycore.get_python_files()
changes = ChangeSet('Changing signature of <%s>' % self.name)
job_set = handle.create_jobset('Collecting Changes', len(resources))
finder = occurrences.create_finder(
self.pycore, self.name, self.pyname, instance=self.primary,
in_hierarchy=in_hierarchy and self.is_method())
if self.others:
name, pyname = self.others
constructor_finder = occurrences.create_finder(
self.pycore, name, pyname, only_calls=True)
finder = _MultipleFinders([finder, constructor_finder])
for file in resources:
job_set.started_job(file.path)
change_calls = _ChangeCallsInModule(
self.pycore, finder, file, call_changer)
changed_file = change_calls.get_changed_module()
if changed_file is not None:
changes.add_change(ChangeContents(file, changed_file))
job_set.finished_job()
return changes
def get_args(self):
"""Get function arguments.
Return a list of ``(name, default)`` tuples for all but star
and double star arguments. For arguments that don't have a
default, `None` will be used.
"""
return self._definfo().args_with_defaults
def is_method(self):
pyfunction = self.pyname.get_object()
return isinstance(pyfunction.parent, pyobjects.PyClass)
@utils.deprecated('Use `ChangeSignature.get_args()` instead')
def get_definition_info(self):
return self._definfo()
def _definfo(self):
return functionutils.DefinitionInfo.read(self.pyname.get_object())
@utils.deprecated()
def normalize(self):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentNormalizer()])
return self._change_calls(changer)
@utils.deprecated()
def remove(self, index):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentRemover(index)])
return self._change_calls(changer)
@utils.deprecated()
def add(self, index, name, default=None, value=None):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentAdder(index, name, default, value)])
return self._change_calls(changer)
@utils.deprecated()
def inline_default(self, index):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentDefaultInliner(index)])
return self._change_calls(changer)
@utils.deprecated()
def reorder(self, new_ordering):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentReorderer(new_ordering)])
return self._change_calls(changer)
def get_changes(self, changers, in_hierarchy=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
"""
function_changer = _FunctionChangers(self.pyname.get_object(),
self._definfo(), changers)
return self._change_calls(function_changer, in_hierarchy,
resources, task_handle)
class _FunctionChangers(object):
def __init__(self, pyfunction, definition_info, changers=None):
self.pyfunction = pyfunction
self.definition_info = definition_info
self.changers = changers
self.changed_definition_infos = self._get_changed_definition_infos()
def _get_changed_definition_infos(self):
result = []
definition_info = self.definition_info
result.append(definition_info)
for changer in self.changers:
definition_info = copy.deepcopy(definition_info)
changer.change_definition_info(definition_info)
result.append(definition_info)
return result
def change_definition(self, call):
return self.changed_definition_infos[-1].to_string()
def change_call(self, primary, pyname, call):
call_info = functionutils.CallInfo.read(
primary, pyname, self.definition_info, call)
mapping = functionutils.ArgumentMapping(self.definition_info, call_info)
for definition_info, changer in zip(self.changed_definition_infos, self.changers):
changer.change_argument_mapping(definition_info, mapping)
return mapping.to_call_info(self.changed_definition_infos[-1]).to_string()
class _ArgumentChanger(object):
def change_definition_info(self, definition_info):
pass
def change_argument_mapping(self, definition_info, argument_mapping):
pass
class ArgumentNormalizer(_ArgumentChanger):
pass
class ArgumentRemover(_ArgumentChanger):
def __init__(self, index):
self.index = index
def change_definition_info(self, call_info):
if self.index < len(call_info.args_with_defaults):
del call_info.args_with_defaults[self.index]
elif self.index == len(call_info.args_with_defaults) and \
call_info.args_arg is not None:
call_info.args_arg = None
elif (self.index == len(call_info.args_with_defaults) and
call_info.args_arg is None and call_info.keywords_arg is not None) or \
(self.index == len(call_info.args_with_defaults) + 1 and
call_info.args_arg is not None and call_info.keywords_arg is not None):
call_info.keywords_arg = None
def change_argument_mapping(self, definition_info, mapping):
if self.index < len(definition_info.args_with_defaults):
name = definition_info.args_with_defaults[0]
if name in mapping.param_dict:
del mapping.param_dict[name]
class ArgumentAdder(_ArgumentChanger):
def __init__(self, index, name, default=None, value=None):
self.index = index
self.name = name
self.default = default
self.value = value
def change_definition_info(self, definition_info):
for pair in definition_info.args_with_defaults:
if pair[0] == self.name:
raise rope.base.exceptions.RefactoringError(
'Adding duplicate parameter: <%s>.' % self.name)
definition_info.args_with_defaults.insert(self.index,
(self.name, self.default))
def change_argument_mapping(self, definition_info, mapping):
if self.value is not None:
mapping.param_dict[self.name] = self.value
class ArgumentDefaultInliner(_ArgumentChanger):
def __init__(self, index):
self.index = index
self.remove = False
def change_definition_info(self, definition_info):
if self.remove:
definition_info.args_with_defaults[self.index] = \
(definition_info.args_with_defaults[self.index][0], None)
def change_argument_mapping(self, definition_info, mapping):
default = definition_info.args_with_defaults[self.index][1]
name = definition_info.args_with_defaults[self.index][0]
if default is not None and name not in mapping.param_dict:
mapping.param_dict[name] = default
class ArgumentReorderer(_ArgumentChanger):
def __init__(self, new_order, autodef=None):
"""Construct an `ArgumentReorderer`
Note that the `new_order` is a list containing the new
position of parameters; not the position each parameter
is going to be moved to. (changed in ``0.5m4``)
For example changing ``f(a, b, c)`` to ``f(c, a, b)``
requires passing ``[2, 0, 1]`` and *not* ``[1, 2, 0]``.
The `autodef` (automatic default) argument, forces rope to use
it as a default if a default is needed after the change. That
happens when an argument without default is moved after
another that has a default value. Note that `autodef` should
be a string or `None`; the latter disables adding automatic
default.
"""
self.new_order = new_order
self.autodef = autodef
def change_definition_info(self, definition_info):
new_args = list(definition_info.args_with_defaults)
for new_index, index in enumerate(self.new_order):
new_args[new_index] = definition_info.args_with_defaults[index]
seen_default = False
for index, (arg, default) in enumerate(list(new_args)):
if default is not None:
seen_default = True
if seen_default and default is None and self.autodef is not None:
new_args[index] = (arg, self.autodef)
definition_info.args_with_defaults = new_args
class _ChangeCallsInModule(object):
def __init__(self, pycore, occurrence_finder, resource, call_changer):
self.pycore = pycore
self.occurrence_finder = occurrence_finder
self.resource = resource
self.call_changer = call_changer
def get_changed_module(self):
word_finder = worder.Worder(self.source)
change_collector = codeanalyze.ChangeCollector(self.source)
for occurrence in self.occurrence_finder.find_occurrences(self.resource):
if not occurrence.is_called() and not occurrence.is_defined():
continue
start, end = occurrence.get_primary_range()
begin_parens, end_parens = word_finder.get_word_parens_range(end - 1)
if occurrence.is_called():
primary, pyname = occurrence.get_primary_and_pyname()
changed_call = self.call_changer.change_call(
primary, pyname, self.source[start:end_parens])
else:
changed_call = self.call_changer.change_definition(
self.source[start:end_parens])
if changed_call is not None:
change_collector.add_change(start, end_parens, changed_call)
return change_collector.get_changed()
@property
@utils.saveit
def pymodule(self):
return self.pycore.resource_to_pyobject(self.resource)
@property
@utils.saveit
def source(self):
if self.resource is not None:
return self.resource.read()
else:
return self.pymodule.source_code
@property
@utils.saveit
def lines(self):
return self.pymodule.lines
class _MultipleFinders(object):
def __init__(self, finders):
self.finders = finders
def find_occurrences(self, resource=None, pymodule=None):
all_occurrences = []
for finder in self.finders:
all_occurrences.extend(finder.find_occurrences(resource, pymodule))
all_occurrences.sort(key = lambda o: o.get_primary_range())
return all_occurrences | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/change_signature.py | 0.714528 | 0.169406 | change_signature.py | pypi |
import warnings
from rope.base import change, taskhandle, builtins, ast, codeanalyze
from rope.refactor import patchedast, similarfinder, sourceutils
from rope.refactor.importutils import module_imports
class Restructure(object):
"""A class to perform python restructurings
A restructuring transforms pieces of code matching `pattern` to
`goal`. In the `pattern` wildcards can appear. Wildcards match
some piece of code based on their kind and arguments that are
passed to them through `args`.
`args` is a dictionary of wildcard names to wildcard arguments.
If the argument is a tuple, the first item of the tuple is
considered to be the name of the wildcard to use; otherwise the
"default" wildcard is used. For getting the list arguments a
wildcard supports, see the pydoc of the wildcard. (see
`rope.refactor.wildcard.DefaultWildcard` for the default
wildcard.)
`wildcards` is the list of wildcard types that can appear in
`pattern`. See `rope.refactor.wildcards`. If a wildcard does not
specify its kind (by using a tuple in args), the wildcard named
"default" is used. So there should be a wildcard with "default"
name in `wildcards`.
`imports` is the list of imports that changed modules should
import. Note that rope handles duplicate imports and does not add
the import if it already appears.
Example #1::
pattern ${pyobject}.get_attribute(${name})
goal ${pyobject}[${name}]
args pyobject: instance=rope.base.pyobjects.PyObject
Example #2::
pattern ${name} in ${pyobject}.get_attributes()
goal ${name} in {pyobject}
args pyobject: instance=rope.base.pyobjects.PyObject
Example #3::
pattern ${pycore}.create_module(${project}.root, ${name})
goal generate.create_module(${project}, ${name})
imports
from rope.contrib import generate
args
pycore: type=rope.base.pycore.PyCore
project: type=rope.base.project.Project
Example #4::
pattern ${pow}(${param1}, ${param2})
goal ${param1} ** ${param2}
args pow: name=mod.pow, exact
Example #5::
pattern ${inst}.longtask(${p1}, ${p2})
goal
${inst}.subtask1(${p1})
${inst}.subtask2(${p2})
args
inst: type=mod.A,unsure
"""
def __init__(self, project, pattern, goal, args=None,
imports=None, wildcards=None):
"""Construct a restructuring
See class pydoc for more info about the arguments.
"""
self.pycore = project.pycore
self.pattern = pattern
self.goal = goal
self.args = args
if self.args is None:
self.args = {}
self.imports = imports
if self.imports is None:
self.imports = []
self.wildcards = wildcards
self.template = similarfinder.CodeTemplate(self.goal)
def get_changes(self, checks=None, imports=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes needed by this restructuring
`resources` can be a list of `rope.base.resources.File`\s to
apply the restructuring on. If `None`, the restructuring will
be applied to all python files.
`checks` argument has been deprecated. Use the `args` argument
of the constructor. The usage of::
strchecks = {'obj1.type': 'mod.A', 'obj2': 'mod.B',
'obj3.object': 'mod.C'}
checks = restructuring.make_checks(strchecks)
can be replaced with::
args = {'obj1': 'type=mod.A', 'obj2': 'name=mod.B',
'obj3': 'object=mod.C'}
where obj1, obj2 and obj3 are wildcard names that appear
in restructuring pattern.
"""
if checks is not None:
warnings.warn(
'The use of checks parameter is deprecated; '
'use the args parameter of the constructor instead.',
DeprecationWarning, stacklevel=2)
for name, value in checks.items():
self.args[name] = similarfinder._pydefined_to_str(value)
if imports is not None:
warnings.warn(
'The use of imports parameter is deprecated; '
'use imports parameter of the constructor, instead.',
DeprecationWarning, stacklevel=2)
self.imports = imports
changes = change.ChangeSet('Restructuring <%s> to <%s>' %
(self.pattern, self.goal))
if resources is not None:
files = [resource for resource in resources
if self.pycore.is_python_file(resource)]
else:
files = self.pycore.get_python_files()
job_set = task_handle.create_jobset('Collecting Changes', len(files))
for resource in files:
job_set.started_job(resource.path)
pymodule = self.pycore.resource_to_pyobject(resource)
finder = similarfinder.SimilarFinder(pymodule,
wildcards=self.wildcards)
matches = list(finder.get_matches(self.pattern, self.args))
computer = self._compute_changes(matches, pymodule)
result = computer.get_changed()
if result is not None:
imported_source = self._add_imports(resource, result,
self.imports)
changes.add_change(change.ChangeContents(resource,
imported_source))
job_set.finished_job()
return changes
def _compute_changes(self, matches, pymodule):
return _ChangeComputer(
pymodule.source_code, pymodule.get_ast(),
pymodule.lines, self.template, matches)
def _add_imports(self, resource, source, imports):
if not imports:
return source
import_infos = self._get_import_infos(resource, imports)
pymodule = self.pycore.get_string_module(source, resource)
imports = module_imports.ModuleImports(self.pycore, pymodule)
for import_info in import_infos:
imports.add_import(import_info)
return imports.get_changed_source()
def _get_import_infos(self, resource, imports):
pymodule = self.pycore.get_string_module('\n'.join(imports),
resource)
imports = module_imports.ModuleImports(self.pycore, pymodule)
return [imports.import_info
for imports in imports.imports]
def make_checks(self, string_checks):
"""Convert str to str dicts to str to PyObject dicts
This function is here to ease writing a UI.
"""
checks = {}
for key, value in string_checks.items():
is_pyname = not key.endswith('.object') and \
not key.endswith('.type')
evaluated = self._evaluate(value, is_pyname=is_pyname)
if evaluated is not None:
checks[key] = evaluated
return checks
def _evaluate(self, code, is_pyname=True):
attributes = code.split('.')
pyname = None
if attributes[0] in ('__builtin__', '__builtins__'):
class _BuiltinsStub(object):
def get_attribute(self, name):
return builtins.builtins[name]
pyobject = _BuiltinsStub()
else:
pyobject = self.pycore.get_module(attributes[0])
for attribute in attributes[1:]:
pyname = pyobject[attribute]
if pyname is None:
return None
pyobject = pyname.get_object()
return pyname if is_pyname else pyobject
def replace(code, pattern, goal):
"""used by other refactorings"""
finder = similarfinder.RawSimilarFinder(code)
matches = list(finder.get_matches(pattern))
ast = patchedast.get_patched_ast(code)
lines = codeanalyze.SourceLinesAdapter(code)
template = similarfinder.CodeTemplate(goal)
computer = _ChangeComputer(code, ast, lines, template, matches)
result = computer.get_changed()
if result is None:
return code
return result
class _ChangeComputer(object):
def __init__(self, code, ast, lines, goal, matches):
self.source = code
self.goal = goal
self.matches = matches
self.ast = ast
self.lines = lines
self.matched_asts = {}
self._nearest_roots = {}
if self._is_expression():
for match in self.matches:
self.matched_asts[match.ast] = match
def get_changed(self):
if self._is_expression():
result = self._get_node_text(self.ast)
if result == self.source:
return None
return result
else:
collector = codeanalyze.ChangeCollector(self.source)
last_end = -1
for match in self.matches:
start, end = match.get_region()
if start < last_end:
if not self._is_expression():
continue
last_end = end
replacement = self._get_matched_text(match)
collector.add_change(start, end, replacement)
return collector.get_changed()
def _is_expression(self):
return self.matches and isinstance(self.matches[0],
similarfinder.ExpressionMatch)
def _get_matched_text(self, match):
mapping = {}
for name in self.goal.get_names():
node = match.get_ast(name)
if node is None:
raise similarfinder.BadNameInCheckError(
'Unknown name <%s>' % name)
force = self._is_expression() and match.ast == node
mapping[name] = self._get_node_text(node, force)
unindented = self.goal.substitute(mapping)
return self._auto_indent(match.get_region()[0], unindented)
def _get_node_text(self, node, force=False):
if not force and node in self.matched_asts:
return self._get_matched_text(self.matched_asts[node])
start, end = patchedast.node_region(node)
main_text = self.source[start:end]
collector = codeanalyze.ChangeCollector(main_text)
for node in self._get_nearest_roots(node):
sub_start, sub_end = patchedast.node_region(node)
collector.add_change(sub_start - start, sub_end - start,
self._get_node_text(node))
result = collector.get_changed()
if result is None:
return main_text
return result
def _auto_indent(self, offset, text):
lineno = self.lines.get_line_number(offset)
indents = sourceutils.get_indents(self.lines, lineno)
result = []
for index, line in enumerate(text.splitlines(True)):
if index != 0 and line.strip():
result.append(' ' * indents)
result.append(line)
return ''.join(result)
def _get_nearest_roots(self, node):
if node not in self._nearest_roots:
result = []
for child in ast.get_child_nodes(node):
if child in self.matched_asts:
result.append(child)
else:
result.extend(self._get_nearest_roots(child))
self._nearest_roots[node] = result
return self._nearest_roots[node] | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/restructure.py | 0.716219 | 0.243474 | restructure.py | pypi |
import re
import rope.base.pynames
from rope.base import pynames, pyobjects, codeanalyze, evaluate, exceptions, utils, worder
class Finder(object):
"""For finding occurrences of a name
The constructor takes a `filters` argument. It should be a list
of functions that take a single argument. For each possible
occurrence, these functions are called in order with the an
instance of `Occurrence`:
* If it returns `None` other filters are tried.
* If it returns `True`, the occurrence will be a match.
* If it returns `False`, the occurrence will be skipped.
* If all of the filters return `None`, it is skipped also.
"""
def __init__(self, pycore, name, filters=[lambda o: True], docs=False):
self.pycore = pycore
self.name = name
self.docs = docs
self.filters = filters
self._textual_finder = _TextualFinder(name, docs=docs)
def find_occurrences(self, resource=None, pymodule=None):
"""Generate `Occurrence` instances"""
tools = _OccurrenceToolsCreator(self.pycore, resource=resource,
pymodule=pymodule, docs=self.docs)
for offset in self._textual_finder.find_offsets(tools.source_code):
occurrence = Occurrence(tools, offset)
for filter in self.filters:
result = filter(occurrence)
if result is None:
continue
if result:
yield occurrence
break
def create_finder(pycore, name, pyname, only_calls=False, imports=True,
unsure=None, docs=False, instance=None, in_hierarchy=False):
"""A factory for `Finder`
Based on the arguments it creates a list of filters. `instance`
argument is needed only when you want implicit interfaces to be
considered.
"""
pynames = set([pyname])
filters = []
if only_calls:
filters.append(CallsFilter())
if not imports:
filters.append(NoImportsFilter())
if isinstance(instance, rope.base.pynames.ParameterName):
for pyobject in instance.get_objects():
try:
pynames.add(pyobject[name])
except exceptions.AttributeNotFoundError:
pass
for pyname in pynames:
filters.append(PyNameFilter(pyname))
if in_hierarchy:
filters.append(InHierarchyFilter(pyname))
if unsure:
filters.append(UnsureFilter(unsure))
return Finder(pycore, name, filters=filters, docs=docs)
class Occurrence(object):
def __init__(self, tools, offset):
self.tools = tools
self.offset = offset
self.resource = tools.resource
@utils.saveit
def get_word_range(self):
return self.tools.word_finder.get_word_range(self.offset)
@utils.saveit
def get_primary_range(self):
return self.tools.word_finder.get_primary_range(self.offset)
@utils.saveit
def get_pyname(self):
try:
return self.tools.name_finder.get_pyname_at(self.offset)
except exceptions.BadIdentifierError:
pass
@utils.saveit
def get_primary_and_pyname(self):
try:
return self.tools.name_finder.get_primary_and_pyname_at(self.offset)
except exceptions.BadIdentifierError:
pass
@utils.saveit
def is_in_import_statement(self):
return (self.tools.word_finder.is_from_statement(self.offset) or
self.tools.word_finder.is_import_statement(self.offset))
def is_called(self):
return self.tools.word_finder.is_a_function_being_called(self.offset)
def is_defined(self):
return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset)
def is_a_fixed_primary(self):
return self.tools.word_finder.is_a_class_or_function_name_in_header(self.offset) or \
self.tools.word_finder.is_a_name_after_from_import(self.offset)
def is_written(self):
return self.tools.word_finder.is_assigned_here(self.offset)
def is_unsure(self):
return unsure_pyname(self.get_pyname())
@property
@utils.saveit
def lineno(self):
offset = self.get_word_range()[0]
return self.tools.pymodule.lines.get_line_number(offset)
def same_pyname(expected, pyname):
"""Check whether `expected` and `pyname` are the same"""
if expected is None or pyname is None:
return False
if expected == pyname:
return True
if type(expected) not in (pynames.ImportedModule, pynames.ImportedName) and \
type(pyname) not in (pynames.ImportedModule, pynames.ImportedName):
return False
return expected.get_definition_location() == pyname.get_definition_location() and \
expected.get_object() == pyname.get_object()
def unsure_pyname(pyname, unbound=True):
"""Return `True` if we don't know what this name references"""
if pyname is None:
return True
if unbound and not isinstance(pyname, pynames.UnboundName):
return False
if pyname.get_object() == pyobjects.get_unknown():
return True
class PyNameFilter(object):
"""For finding occurrences of a name"""
def __init__(self, pyname):
self.pyname = pyname
def __call__(self, occurrence):
if same_pyname(self.pyname, occurrence.get_pyname()):
return True
class InHierarchyFilter(object):
"""For finding occurrences of a name"""
def __init__(self, pyname, implementations_only=False):
self.pyname = pyname
self.impl_only = implementations_only
self.pyclass = self._get_containing_class(pyname)
if self.pyclass is not None:
self.name = pyname.get_object().get_name()
self.roots = self._get_root_classes(self.pyclass, self.name)
else:
self.roots = None
def __call__(self, occurrence):
if self.roots is None:
return
pyclass = self._get_containing_class(occurrence.get_pyname())
if pyclass is not None:
roots = self._get_root_classes(pyclass, self.name)
if self.roots.intersection(roots):
return True
def _get_containing_class(self, pyname):
if isinstance(pyname, pynames.DefinedName):
scope = pyname.get_object().get_scope()
parent = scope.parent
if parent is not None and parent.get_kind() == 'Class':
return parent.pyobject
def _get_root_classes(self, pyclass, name):
if self.impl_only and pyclass == self.pyclass:
return set([pyclass])
result = set()
for superclass in pyclass.get_superclasses():
if name in superclass:
result.update(self._get_root_classes(superclass, name))
if not result:
return set([pyclass])
return result
class UnsureFilter(object):
def __init__(self, unsure):
self.unsure = unsure
def __call__(self, occurrence):
if occurrence.is_unsure() and self.unsure(occurrence):
return True
class NoImportsFilter(object):
def __call__(self, occurrence):
if occurrence.is_in_import_statement():
return False
class CallsFilter(object):
def __call__(self, occurrence):
if not occurrence.is_called():
return False
class _TextualFinder(object):
def __init__(self, name, docs=False):
self.name = name
self.docs = docs
self.comment_pattern = _TextualFinder.any('comment', [r'#[^\n]*'])
self.string_pattern = _TextualFinder.any(
'string', [codeanalyze.get_string_pattern()])
self.pattern = self._get_occurrence_pattern(self.name)
def find_offsets(self, source):
if not self._fast_file_query(source):
return
if self.docs:
searcher = self._normal_search
else:
searcher = self._re_search
for matched in searcher(source):
yield matched
def _re_search(self, source):
for match in self.pattern.finditer(source):
for key, value in match.groupdict().items():
if value and key == 'occurrence':
yield match.start(key)
def _normal_search(self, source):
current = 0
while True:
try:
found = source.index(self.name, current)
current = found + len(self.name)
if (found == 0 or not self._is_id_char(source[found - 1])) and \
(current == len(source) or not self._is_id_char(source[current])):
yield found
except ValueError:
break
def _is_id_char(self, c):
return c.isalnum() or c == '_'
def _fast_file_query(self, source):
try:
source.index(self.name)
return True
except ValueError:
return False
def _get_source(self, resource, pymodule):
if resource is not None:
return resource.read()
else:
return pymodule.source_code
def _get_occurrence_pattern(self, name):
occurrence_pattern = _TextualFinder.any('occurrence',
['\\b' + name + '\\b'])
pattern = re.compile(occurrence_pattern + '|' + self.comment_pattern +
'|' + self.string_pattern)
return pattern
@staticmethod
def any(name, list_):
return '(?P<%s>' % name + '|'.join(list_) + ')'
class _OccurrenceToolsCreator(object):
def __init__(self, pycore, resource=None, pymodule=None, docs=False):
self.pycore = pycore
self.__resource = resource
self.__pymodule = pymodule
self.docs = docs
@property
@utils.saveit
def name_finder(self):
return evaluate.ScopeNameFinder(self.pymodule)
@property
@utils.saveit
def source_code(self):
if self.__resource is not None:
return self.resource.read()
else:
return self.pymodule.source_code
@property
@utils.saveit
def word_finder(self):
return worder.Worder(self.source_code, self.docs)
@property
@utils.saveit
def resource(self):
if self.__resource is not None:
return self.__resource
if self.__pymodule is not None:
return self.__pymodule.resource
@property
@utils.saveit
def pymodule(self):
if self.__pymodule is not None:
return self.__pymodule
return self.pycore.resource_to_pyobject(self.resource) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/occurrences.py | 0.71413 | 0.405714 | occurrences.py | pypi |
from rope.base import ast, codeanalyze
def get_indents(lines, lineno):
return codeanalyze.count_line_indents(lines.get_line(lineno))
def find_minimum_indents(source_code):
result = 80
lines = source_code.split('\n')
for line in lines:
if line.strip() == '':
continue
result = min(result, codeanalyze.count_line_indents(line))
return result
def indent_lines(source_code, amount):
if amount == 0:
return source_code
lines = source_code.splitlines(True)
result = []
for l in lines:
if l.strip() == '':
result.append('\n')
continue
if amount < 0:
indents = codeanalyze.count_line_indents(l)
result.append(max(0, indents + amount) * ' ' + l.lstrip())
else:
result.append(' ' * amount + l)
return ''.join(result)
def fix_indentation(code, new_indents):
"""Change the indentation of `code` to `new_indents`"""
min_indents = find_minimum_indents(code)
return indent_lines(code, new_indents - min_indents)
def add_methods(pymodule, class_scope, methods_sources):
source_code = pymodule.source_code
lines = pymodule.lines
insertion_line = class_scope.get_end()
if class_scope.get_scopes():
insertion_line = class_scope.get_scopes()[-1].get_end()
insertion_offset = lines.get_line_end(insertion_line)
methods = '\n\n' + '\n\n'.join(methods_sources)
indented_methods = fix_indentation(
methods, get_indents(lines, class_scope.get_start()) +
get_indent(pymodule.pycore))
result = []
result.append(source_code[:insertion_offset])
result.append(indented_methods)
result.append(source_code[insertion_offset:])
return ''.join(result)
def get_body(pyfunction):
"""Return unindented function body"""
scope = pyfunction.get_scope()
pymodule = pyfunction.get_module()
start, end = get_body_region(pyfunction)
return fix_indentation(pymodule.source_code[start:end], 0)
def get_body_region(defined):
"""Return the start and end offsets of function body"""
scope = defined.get_scope()
pymodule = defined.get_module()
lines = pymodule.lines
node = defined.get_ast()
start_line = node.lineno
if defined.get_doc() is None:
start_line = node.body[0].lineno
elif len(node.body) > 1:
start_line = node.body[1].lineno
start = lines.get_line_start(start_line)
scope_start = pymodule.logical_lines.logical_line_in(scope.start)
if scope_start[1] >= start_line:
# a one-liner!
# XXX: what if colon appears in a string
start = pymodule.source_code.index(':', start) + 1
while pymodule.source_code[start].isspace():
start += 1
end = min(lines.get_line_end(scope.end) + 1, len(pymodule.source_code))
return start, end
def get_indent(pycore):
project = pycore.project
return project.prefs.get('indent_size', 4) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/sourceutils.py | 0.492188 | 0.205217 | sourceutils.py | pypi |
import warnings
from rope.base import exceptions, pyobjects, pynames, taskhandle, evaluate, worder, codeanalyze
from rope.base.change import ChangeSet, ChangeContents, MoveResource
from rope.refactor import occurrences, sourceutils
class Rename(object):
"""A class for performing rename refactoring
It can rename everything: classes, functions, modules, packages,
methods, variables and keyword arguments.
"""
def __init__(self, project, resource, offset=None):
"""If `offset` is None, the `resource` itself will be renamed"""
self.project = project
self.pycore = project.pycore
self.resource = resource
if offset is not None:
self.old_name = worder.get_name_at(self.resource, offset)
this_pymodule = self.pycore.resource_to_pyobject(self.resource)
self.old_instance, self.old_pyname = \
evaluate.eval_location2(this_pymodule, offset)
if self.old_pyname is None:
raise exceptions.RefactoringError(
'Rename refactoring should be performed'
' on resolvable python identifiers.')
else:
if not resource.is_folder() and resource.name == '__init__.py':
resource = resource.parent
dummy_pymodule = self.pycore.get_string_module('')
self.old_instance = None
self.old_pyname = pynames.ImportedModule(dummy_pymodule,
resource=resource)
if resource.is_folder():
self.old_name = resource.name
else:
self.old_name = resource.name[:-3]
def get_old_name(self):
return self.old_name
def get_changes(self, new_name, in_file=None, in_hierarchy=False,
unsure=None, docs=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes needed for this refactoring
Parameters:
- `in_hierarchy`: when renaming a method this keyword forces
to rename all matching methods in the hierarchy
- `docs`: when `True` rename refactoring will rename
occurrences in comments and strings where the name is
visible. Setting it will make renames faster, too.
- `unsure`: decides what to do about unsure occurrences.
If `None`, they are ignored. Otherwise `unsure` is
called with an instance of `occurrence.Occurrence` as
parameter. If it returns `True`, the occurrence is
considered to be a match.
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
- `in_file`: this argument has been deprecated; use
`resources` instead.
"""
if unsure in (True, False):
warnings.warn(
'unsure parameter should be a function that returns '
'True or False', DeprecationWarning, stacklevel=2)
def unsure_func(value=unsure):
return value
unsure = unsure_func
if in_file is not None:
warnings.warn(
'`in_file` argument has been deprecated; use `resources` '
'instead. ', DeprecationWarning, stacklevel=2)
if in_file:
resources = [self.resource]
if _is_local(self.old_pyname):
resources = [self.resource]
if resources is None:
resources = self.pycore.get_python_files()
changes = ChangeSet('Renaming <%s> to <%s>' %
(self.old_name, new_name))
finder = occurrences.create_finder(
self.pycore, self.old_name, self.old_pyname, unsure=unsure,
docs=docs, instance=self.old_instance,
in_hierarchy=in_hierarchy and self.is_method())
job_set = task_handle.create_jobset('Collecting Changes', len(resources))
for file_ in resources:
job_set.started_job(file_.path)
new_content = rename_in_module(finder, new_name, resource=file_)
if new_content is not None:
changes.add_change(ChangeContents(file_, new_content))
job_set.finished_job()
if self._is_renaming_a_module():
resource = self.old_pyname.get_object().get_resource()
if self._is_allowed_to_move(resources, resource):
self._rename_module(resource, new_name, changes)
return changes
def _is_allowed_to_move(self, resources, resource):
if resource.is_folder():
try:
return resource.get_child('__init__.py') in resources
except exceptions.ResourceNotFoundError:
return False
else:
return resource in resources
def _is_renaming_a_module(self):
if isinstance(self.old_pyname.get_object(), pyobjects.AbstractModule):
return True
return False
def is_method(self):
pyname = self.old_pyname
return isinstance(pyname, pynames.DefinedName) and \
isinstance(pyname.get_object(), pyobjects.PyFunction) and \
isinstance(pyname.get_object().parent, pyobjects.PyClass)
def _rename_module(self, resource, new_name, changes):
if not resource.is_folder():
new_name = new_name + '.py'
parent_path = resource.parent.path
if parent_path == '':
new_location = new_name
else:
new_location = parent_path + '/' + new_name
changes.add_change(MoveResource(resource, new_location))
class ChangeOccurrences(object):
"""A class for changing the occurrences of a name in a scope
This class replaces the occurrences of a name. Note that it only
changes the scope containing the offset passed to the constructor.
What's more it does not have any side-effects. That is for
example changing occurrences of a module does not rename the
module; it merely replaces the occurrences of that module in a
scope with the given expression. This class is useful for
performing many custom refactorings.
"""
def __init__(self, project, resource, offset):
self.pycore = project.pycore
self.resource = resource
self.offset = offset
self.old_name = worder.get_name_at(resource, offset)
self.pymodule = self.pycore.resource_to_pyobject(self.resource)
self.old_pyname = evaluate.eval_location(self.pymodule, offset)
def get_old_name(self):
word_finder = worder.Worder(self.resource.read())
return word_finder.get_primary_at(self.offset)
def _get_scope_offset(self):
lines = self.pymodule.lines
scope = self.pymodule.get_scope().\
get_inner_scope_for_line(lines.get_line_number(self.offset))
start = lines.get_line_start(scope.get_start())
end = lines.get_line_end(scope.get_end())
return start, end
def get_changes(self, new_name, only_calls=False, reads=True, writes=True):
changes = ChangeSet('Changing <%s> occurrences to <%s>' %
(self.old_name, new_name))
scope_start, scope_end = self._get_scope_offset()
finder = occurrences.create_finder(
self.pycore, self.old_name, self.old_pyname,
imports=False, only_calls=only_calls)
new_contents = rename_in_module(
finder, new_name, pymodule=self.pymodule, replace_primary=True,
region=(scope_start, scope_end), reads=reads, writes=writes)
if new_contents is not None:
changes.add_change(ChangeContents(self.resource, new_contents))
return changes
def rename_in_module(occurrences_finder, new_name, resource=None, pymodule=None,
replace_primary=False, region=None, reads=True, writes=True):
"""Returns the changed source or `None` if there is no changes"""
if resource is not None:
source_code = resource.read()
else:
source_code = pymodule.source_code
change_collector = codeanalyze.ChangeCollector(source_code)
for occurrence in occurrences_finder.find_occurrences(resource, pymodule):
if replace_primary and occurrence.is_a_fixed_primary():
continue
if replace_primary:
start, end = occurrence.get_primary_range()
else:
start, end = occurrence.get_word_range()
if (not reads and not occurrence.is_written()) or \
(not writes and occurrence.is_written()):
continue
if region is None or region[0] <= start < region[1]:
change_collector.add_change(start, end, new_name)
return change_collector.get_changed()
def _is_local(pyname):
module, lineno = pyname.get_definition_location()
if lineno is None:
return False
scope = module.get_scope().get_inner_scope_for_line(lineno)
if isinstance(pyname, pynames.DefinedName) and \
scope.get_kind() in ('Function', 'Class'):
scope = scope.parent
return scope.get_kind() == 'Function' and \
pyname in list(scope.get_names().values()) and \
isinstance(pyname, pynames.AssignedName) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/rename.py | 0.648132 | 0.214177 | rename.py | pypi |
import rope.base.change
from rope.base import exceptions, evaluate, worder, codeanalyze
from rope.refactor import functionutils, sourceutils, occurrences
class IntroduceParameter(object):
"""Introduce parameter refactoring
This refactoring adds a new parameter to a function and replaces
references to an expression in it with the new parameter.
The parameter finding part is different from finding similar
pieces in extract refactorings. In this refactoring parameters
are found based on the object they reference to. For instance
in::
class A(object):
var = None
class B(object):
a = A()
b = B()
a = b.a
def f(a):
x = b.a.var + a.var
using this refactoring on ``a.var`` with ``p`` as the new
parameter name, will result in::
def f(p=a.var):
x = p + p
"""
def __init__(self, project, resource, offset):
self.pycore = project.pycore
self.resource = resource
self.offset = offset
self.pymodule = self.pycore.resource_to_pyobject(self.resource)
scope = self.pymodule.get_scope().get_inner_scope_for_offset(offset)
if scope.get_kind() != 'Function':
raise exceptions.RefactoringError(
'Introduce parameter should be performed inside functions')
self.pyfunction = scope.pyobject
self.name, self.pyname = self._get_name_and_pyname()
if self.pyname is None:
raise exceptions.RefactoringError(
'Cannot find the definition of <%s>' % self.name)
def _get_primary(self):
word_finder = worder.Worder(self.resource.read())
return word_finder.get_primary_at(self.offset)
def _get_name_and_pyname(self):
return (worder.get_name_at(self.resource, self.offset),
evaluate.eval_location(self.pymodule, self.offset))
def get_changes(self, new_parameter):
definition_info = functionutils.DefinitionInfo.read(self.pyfunction)
definition_info.args_with_defaults.append((new_parameter,
self._get_primary()))
collector = codeanalyze.ChangeCollector(self.resource.read())
header_start, header_end = self._get_header_offsets()
body_start, body_end = sourceutils.get_body_region(self.pyfunction)
collector.add_change(header_start, header_end,
definition_info.to_string())
self._change_function_occurances(collector, body_start,
body_end, new_parameter)
changes = rope.base.change.ChangeSet('Introduce parameter <%s>' %
new_parameter)
change = rope.base.change.ChangeContents(self.resource,
collector.get_changed())
changes.add_change(change)
return changes
def _get_header_offsets(self):
lines = self.pymodule.lines
start_line = self.pyfunction.get_scope().get_start()
end_line = self.pymodule.logical_lines.\
logical_line_in(start_line)[1]
start = lines.get_line_start(start_line)
end = lines.get_line_end(end_line)
start = self.pymodule.source_code.find('def', start) + 4
end = self.pymodule.source_code.rfind(':', start, end)
return start, end
def _change_function_occurances(self, collector, function_start,
function_end, new_name):
finder = occurrences.create_finder(self.pycore, self.name, self.pyname)
for occurrence in finder.find_occurrences(resource=self.resource):
start, end = occurrence.get_primary_range()
if function_start <= start < function_end:
collector.add_change(start, end, new_name) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/introduce_parameter.py | 0.657098 | 0.376279 | introduce_parameter.py | pypi |
from rope.base import ast
def find_visible(node, lines):
"""Return the line which is visible from all `lines`"""
root = ast_suite_tree(node)
return find_visible_for_suite(root, lines)
def find_visible_for_suite(root, lines):
if len(lines) == 1:
return lines[0]
line1 = lines[0]
line2 = find_visible_for_suite(root, lines[1:])
suite1 = root.find_suite(line1)
suite2 = root.find_suite(line2)
def valid(suite):
return suite is not None and not suite.ignored
if valid(suite1) and not valid(suite2):
return line1
if not valid(suite1) and valid(suite2):
return line2
if not valid(suite1) and not valid(suite2):
return None
while suite1 != suite2 and suite1.parent != suite2.parent:
if suite1._get_level() < suite2._get_level():
line2 = suite2.get_start()
suite2 = suite2.parent
elif suite1._get_level() > suite2._get_level():
line1 = suite1.get_start()
suite1 = suite1.parent
else:
line1 = suite1.get_start()
line2 = suite2.get_start()
suite1 = suite1.parent
suite2 = suite2.parent
if suite1 == suite2:
return min(line1, line2)
return min(suite1.get_start(), suite2.get_start())
def ast_suite_tree(node):
if hasattr(node, 'lineno'):
lineno = node.lineno
else:
lineno = 1
return Suite(node.body, lineno)
class Suite(object):
def __init__(self, child_nodes, lineno, parent=None, ignored=False):
self.parent = parent
self.lineno = lineno
self.child_nodes = child_nodes
self._children = None
self.ignored = ignored
def get_start(self):
if self.parent is None:
if self.child_nodes:
return self.local_start()
else:
return 1
return self.lineno
def get_children(self):
if self._children is None:
walker = _SuiteWalker(self)
for child in self.child_nodes:
ast.walk(child, walker)
self._children = walker.suites
return self._children
def local_start(self):
return self.child_nodes[0].lineno
def local_end(self):
end = self.child_nodes[-1].lineno
if self.get_children():
end = max(end, self.get_children()[-1].local_end())
return end
def find_suite(self, line):
if line is None:
return None
for child in self.get_children():
if child.local_start() <= line <= child.local_end():
return child.find_suite(line)
return self
def _get_level(self):
if self.parent is None:
return 0
return self.parent._get_level() + 1
class _SuiteWalker(object):
def __init__(self, suite):
self.suite = suite
self.suites = []
def _If(self, node):
self._add_if_like_node(node)
def _For(self, node):
self._add_if_like_node(node)
def _While(self, node):
self._add_if_like_node(node)
def _With(self, node):
self.suites.append(Suite(node.body, node.lineno, self.suite))
def _TryFinally(self, node):
if len(node.finalbody) == 1 and \
isinstance(node.body[0], ast.TryExcept):
self._TryExcept(node.body[0])
else:
self.suites.append(Suite(node.body, node.lineno, self.suite))
self.suites.append(Suite(node.finalbody, node.lineno, self.suite))
def _TryExcept(self, node):
self.suites.append(Suite(node.body, node.lineno, self.suite))
for handler in node.handlers:
self.suites.append(Suite(handler.body, node.lineno, self.suite))
if node.orelse:
self.suites.append(Suite(node.orelse, node.lineno, self.suite))
def _add_if_like_node(self, node):
self.suites.append(Suite(node.body, node.lineno, self.suite))
if node.orelse:
self.suites.append(Suite(node.orelse, node.lineno, self.suite))
def _FunctionDef(self, node):
self.suites.append(Suite(node.body, node.lineno,
self.suite, ignored=True))
def _ClassDef(self, node):
self.suites.append(Suite(node.body, node.lineno,
self.suite, ignored=True)) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/suites.py | 0.651466 | 0.237786 | suites.py | pypi |
import re
import rope.refactor.wildcards
from rope.base import codeanalyze, evaluate, exceptions, ast, builtins
from rope.refactor import (patchedast, sourceutils, occurrences,
wildcards, importutils)
class BadNameInCheckError(exceptions.RefactoringError):
pass
class SimilarFinder(object):
"""`SimilarFinder` can be used to find similar pieces of code
See the notes in the `rope.refactor.restructure` module for more
info.
"""
def __init__(self, pymodule, wildcards=None):
"""Construct a SimilarFinder"""
self.source = pymodule.source_code
self.raw_finder = RawSimilarFinder(
pymodule.source_code, pymodule.get_ast(), self._does_match)
self.pymodule = pymodule
if wildcards is None:
self.wildcards = {}
for wildcard in [rope.refactor.wildcards.
DefaultWildcard(pymodule.pycore.project)]:
self.wildcards[wildcard.get_name()] = wildcard
else:
self.wildcards = wildcards
def get_matches(self, code, args={}, start=0, end=None):
self.args = args
if end is None:
end = len(self.source)
skip_region = None
if 'skip' in args.get('', {}):
resource, region = args['']['skip']
if resource == self.pymodule.get_resource():
skip_region = region
return self.raw_finder.get_matches(code, start=start, end=end,
skip=skip_region)
def get_match_regions(self, *args, **kwds):
for match in self.get_matches(*args, **kwds):
yield match.get_region()
def _does_match(self, node, name):
arg = self.args.get(name, '')
kind = 'default'
if isinstance(arg, (tuple, list)):
kind = arg[0]
arg = arg[1]
suspect = wildcards.Suspect(self.pymodule, node, name)
return self.wildcards[kind].matches(suspect, arg)
class RawSimilarFinder(object):
"""A class for finding similar expressions and statements"""
def __init__(self, source, node=None, does_match=None):
if node is None:
node = ast.parse(source)
if does_match is None:
self.does_match = self._simple_does_match
else:
self.does_match = does_match
self._init_using_ast(node, source)
def _simple_does_match(self, node, name):
return isinstance(node, (ast.expr, ast.Name))
def _init_using_ast(self, node, source):
self.source = source
self._matched_asts = {}
if not hasattr(node, 'region'):
patchedast.patch_ast(node, source)
self.ast = node
def get_matches(self, code, start=0, end=None, skip=None):
"""Search for `code` in source and return a list of `Match`\es
`code` can contain wildcards. ``${name}`` matches normal
names and ``${?name} can match any expression. You can use
`Match.get_ast()` for getting the node that has matched a
given pattern.
"""
if end is None:
end = len(self.source)
for match in self._get_matched_asts(code):
match_start, match_end = match.get_region()
if start <= match_start and match_end <= end:
if skip is not None and (skip[0] < match_end and
skip[1] > match_start):
continue
yield match
def _get_matched_asts(self, code):
if code not in self._matched_asts:
wanted = self._create_pattern(code)
matches = _ASTMatcher(self.ast, wanted,
self.does_match).find_matches()
self._matched_asts[code] = matches
return self._matched_asts[code]
def _create_pattern(self, expression):
expression = self._replace_wildcards(expression)
node = ast.parse(expression)
# Getting Module.Stmt.nodes
nodes = node.body
if len(nodes) == 1 and isinstance(nodes[0], ast.Expr):
# Getting Discard.expr
wanted = nodes[0].value
else:
wanted = nodes
return wanted
def _replace_wildcards(self, expression):
ropevar = _RopeVariable()
template = CodeTemplate(expression)
mapping = {}
for name in template.get_names():
mapping[name] = ropevar.get_var(name)
return template.substitute(mapping)
class _ASTMatcher(object):
def __init__(self, body, pattern, does_match):
"""Searches the given pattern in the body AST.
body is an AST node and pattern can be either an AST node or
a list of ASTs nodes
"""
self.body = body
self.pattern = pattern
self.matches = None
self.ropevar = _RopeVariable()
self.matches_callback = does_match
def find_matches(self):
if self.matches is None:
self.matches = []
ast.call_for_nodes(self.body, self._check_node, recursive=True)
return self.matches
def _check_node(self, node):
if isinstance(self.pattern, list):
self._check_statements(node)
else:
self._check_expression(node)
def _check_expression(self, node):
mapping = {}
if self._match_nodes(self.pattern, node, mapping):
self.matches.append(ExpressionMatch(node, mapping))
def _check_statements(self, node):
for child in ast.get_children(node):
if isinstance(child, (list, tuple)):
self.__check_stmt_list(child)
def __check_stmt_list(self, nodes):
for index in range(len(nodes)):
if len(nodes) - index >= len(self.pattern):
current_stmts = nodes[index:index + len(self.pattern)]
mapping = {}
if self._match_stmts(current_stmts, mapping):
self.matches.append(StatementMatch(current_stmts, mapping))
def _match_nodes(self, expected, node, mapping):
if isinstance(expected, ast.Name):
if self.ropevar.is_var(expected.id):
return self._match_wildcard(expected, node, mapping)
if not isinstance(expected, ast.AST):
return expected == node
if expected.__class__ != node.__class__:
return False
children1 = self._get_children(expected)
children2 = self._get_children(node)
if len(children1) != len(children2):
return False
for child1, child2 in zip(children1, children2):
if isinstance(child1, ast.AST):
if not self._match_nodes(child1, child2, mapping):
return False
elif isinstance(child1, (list, tuple)):
if not isinstance(child2, (list, tuple)) or \
len(child1) != len(child2):
return False
for c1, c2 in zip(child1, child2):
if not self._match_nodes(c1, c2, mapping):
return False
else:
if child1 != child2:
return False
return True
def _get_children(self, node):
"""Return not `ast.expr_context` children of `node`"""
children = ast.get_children(node)
return [child for child in children
if not isinstance(child, ast.expr_context)]
def _match_stmts(self, current_stmts, mapping):
if len(current_stmts) != len(self.pattern):
return False
for stmt, expected in zip(current_stmts, self.pattern):
if not self._match_nodes(expected, stmt, mapping):
return False
return True
def _match_wildcard(self, node1, node2, mapping):
name = self.ropevar.get_base(node1.id)
if name not in mapping:
if self.matches_callback(node2, name):
mapping[name] = node2
return True
return False
else:
return self._match_nodes(mapping[name], node2, {})
class Match(object):
def __init__(self, mapping):
self.mapping = mapping
def get_region(self):
"""Returns match region"""
def get_ast(self, name):
"""Return the ast node that has matched rope variables"""
return self.mapping.get(name, None)
class ExpressionMatch(Match):
def __init__(self, ast, mapping):
super(ExpressionMatch, self).__init__(mapping)
self.ast = ast
def get_region(self):
return self.ast.region
class StatementMatch(Match):
def __init__(self, ast_list, mapping):
super(StatementMatch, self).__init__(mapping)
self.ast_list = ast_list
def get_region(self):
return self.ast_list[0].region[0], self.ast_list[-1].region[1]
class CodeTemplate(object):
def __init__(self, template):
self.template = template
self._find_names()
def _find_names(self):
self.names = {}
for match in CodeTemplate._get_pattern().finditer(self.template):
if 'name' in match.groupdict() and \
match.group('name') is not None:
start, end = match.span('name')
name = self.template[start + 2:end - 1]
if name not in self.names:
self.names[name] = []
self.names[name].append((start, end))
def get_names(self):
return list(self.names.keys())
def substitute(self, mapping):
collector = codeanalyze.ChangeCollector(self.template)
for name, occurrences in self.names.items():
for region in occurrences:
collector.add_change(region[0], region[1], mapping[name])
result = collector.get_changed()
if result is None:
return self.template
return result
_match_pattern = None
@classmethod
def _get_pattern(cls):
if cls._match_pattern is None:
pattern = codeanalyze.get_comment_pattern() + '|' + \
codeanalyze.get_string_pattern() + '|' + \
r'(?P<name>\$\{[^\s\$\}]*\})'
cls._match_pattern = re.compile(pattern)
return cls._match_pattern
class _RopeVariable(object):
"""Transform and identify rope inserted wildcards"""
_normal_prefix = '__rope__variable_normal_'
_any_prefix = '__rope__variable_any_'
def get_var(self, name):
if name.startswith('?'):
return self._get_any(name)
else:
return self._get_normal(name)
def is_var(self, name):
return self._is_normal(name) or self._is_var(name)
def get_base(self, name):
if self._is_normal(name):
return name[len(self._normal_prefix):]
if self._is_var(name):
return '?' + name[len(self._any_prefix):]
def _get_normal(self, name):
return self._normal_prefix + name
def _get_any(self, name):
return self._any_prefix + name[1:]
def _is_normal(self, name):
return name.startswith(self._normal_prefix)
def _is_var(self, name):
return name.startswith(self._any_prefix)
def make_pattern(code, variables):
variables = set(variables)
collector = codeanalyze.ChangeCollector(code)
def does_match(node, name):
return isinstance(node, ast.Name) and node.id == name
finder = RawSimilarFinder(code, does_match=does_match)
for variable in variables:
for match in finder.get_matches('${%s}' % variable):
start, end = match.get_region()
collector.add_change(start, end, '${%s}' % variable)
result = collector.get_changed()
return result if result is not None else code
def _pydefined_to_str(pydefined):
address = []
if isinstance(pydefined, (builtins.BuiltinClass, builtins.BuiltinFunction)):
return '__builtins__.' + pydefined.get_name()
else:
while pydefined.parent is not None:
address.insert(0, pydefined.get_name())
pydefined = pydefined.parent
module_name = pydefined.pycore.modname(pydefined.resource)
return '.'.join(module_name.split('.') + address) | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/similarfinder.py | 0.686055 | 0.244848 | similarfinder.py | pypi |
from rope.base import ast, evaluate, builtins, pyobjects
from rope.refactor import patchedast, occurrences
class Wildcard(object):
def get_name(self):
"""Return the name of this wildcard"""
def matches(self, suspect, arg):
"""Return `True` if `suspect` matches this wildcard"""
class Suspect(object):
def __init__(self, pymodule, node, name):
self.name = name
self.pymodule = pymodule
self.node = node
class DefaultWildcard(object):
"""The default restructuring wildcard
The argument passed to this wildcard is in the
``key1=value1,key2=value2,...`` format. Possible keys are:
* name - for checking the reference
* type - for checking the type
* object - for checking the object
* instance - for checking types but similar to builtin isinstance
* exact - matching only occurrences with the same name as the wildcard
* unsure - matching unsure occurrences
"""
def __init__(self, project):
self.project = project
def get_name(self):
return 'default'
def matches(self, suspect, arg=''):
args = parse_arg(arg)
if not self._check_exact(args, suspect):
return False
if not self._check_object(args, suspect):
return False
return True
def _check_object(self, args, suspect):
kind = None
expected = None
unsure = args.get('unsure', False)
for check in ['name', 'object', 'type', 'instance']:
if check in args:
kind = check
expected = args[check]
if expected is not None:
checker = _CheckObject(self.project, expected,
kind, unsure=unsure)
return checker(suspect.pymodule, suspect.node)
return True
def _check_exact(self, args, suspect):
node = suspect.node
if args.get('exact'):
if not isinstance(node, ast.Name) or not node.id == suspect.name:
return False
else:
if not isinstance(node, ast.expr):
return False
return True
def parse_arg(arg):
if isinstance(arg, dict):
return arg
result = {}
tokens = arg.split(',')
for token in tokens:
if '=' in token:
parts = token.split('=', 1)
result[parts[0].strip()] = parts[1].strip()
else:
result[token.strip()] = True
return result
class _CheckObject(object):
def __init__(self, project, expected, kind='object', unsure=False):
self.project = project
self.kind = kind
self.unsure = unsure
self.expected = self._evaluate(expected)
def __call__(self, pymodule, node):
pyname = self._evaluate_node(pymodule, node)
if pyname is None or self.expected is None:
return self.unsure
if self._unsure_pyname(pyname, unbound=self.kind=='name'):
return True
if self.kind == 'name':
return self._same_pyname(self.expected, pyname)
else:
pyobject = pyname.get_object()
if self.kind == 'object':
objects = [pyobject]
if self.kind == 'type':
objects = [pyobject.get_type()]
if self.kind == 'instance':
objects = [pyobject]
objects.extend(self._get_super_classes(pyobject))
objects.extend(self._get_super_classes(pyobject.get_type()))
for pyobject in objects:
if self._same_pyobject(self.expected.get_object(), pyobject):
return True
return False
def _get_super_classes(self, pyobject):
result = []
if isinstance(pyobject, pyobjects.AbstractClass):
for superclass in pyobject.get_superclasses():
result.append(superclass)
result.extend(self._get_super_classes(superclass))
return result
def _same_pyobject(self, expected, pyobject):
return expected == pyobject
def _same_pyname(self, expected, pyname):
return occurrences.same_pyname(expected, pyname)
def _unsure_pyname(self, pyname, unbound=True):
return self.unsure and occurrences.unsure_pyname(pyname, unbound)
def _split_name(self, name):
parts = name.split('.')
expression, kind = parts[0], parts[-1]
if len(parts) == 1:
kind = 'name'
return expression, kind
def _evaluate_node(self, pymodule, node):
scope = pymodule.get_scope().get_inner_scope_for_line(node.lineno)
expression = node
if isinstance(expression, ast.Name) and \
isinstance(expression.ctx, ast.Store):
start, end = patchedast.node_region(expression)
text = pymodule.source_code[start:end]
return evaluate.eval_str(scope, text)
else:
return evaluate.eval_node(scope, expression)
def _evaluate(self, code):
attributes = code.split('.')
pyname = None
if attributes[0] in ('__builtin__', '__builtins__'):
class _BuiltinsStub(object):
def get_attribute(self, name):
return builtins.builtins[name]
def __getitem__(self, name):
return builtins.builtins[name]
def __contains__(self, name):
return name in builtins.builtins
pyobject = _BuiltinsStub()
else:
pyobject = self.project.pycore.get_module(attributes[0])
for attribute in attributes[1:]:
pyname = pyobject[attribute]
if pyname is None:
return None
pyobject = pyname.get_object()
return pyname | /rope_py3k-0.9.4.tar.gz/rope_py3k-0.9.4/rope/refactor/wildcards.py | 0.796055 | 0.365796 | wildcards.py | pypi |
try:
import unittest2 as unittest
except ImportError:
import unittest
import rope.base.taskhandle
import rope.refactor.introduce_parameter
import ropetest.refactor.extracttest
import ropetest.refactor.importutilstest
import ropetest.refactor.inlinetest
import ropetest.refactor.movetest
import ropetest.refactor.multiprojecttest
import ropetest.refactor.patchedasttest
import ropetest.refactor.renametest
import ropetest.refactor.restructuretest
import ropetest.refactor.suitestest
import ropetest.refactor.usefunctiontest
from rope.base.exceptions import RefactoringError, InterruptedTaskError
from rope.refactor.encapsulate_field import EncapsulateField
from rope.refactor.introduce_factory import IntroduceFactory
from rope.refactor.localtofield import LocalToField
from rope.refactor.method_object import MethodObject
from ropetest import testutils
from ropetest.refactor import change_signature_test, similarfindertest
class MethodObjectTest(unittest.TestCase):
def setUp(self):
super(MethodObjectTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
def tearDown(self):
testutils.remove_project(self.project)
super(MethodObjectTest, self).tearDown()
def test_empty_method(self):
code = 'def func():\n pass\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n def __call__(self):\n pass\n',
replacer.get_new_class('_New'))
def test_trivial_return(self):
code = 'def func():\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n def __call__(self):'
'\n return 1\n',
replacer.get_new_class('_New'))
def test_multi_line_header(self):
code = 'def func(\n ):\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n def __call__(self):'
'\n return 1\n',
replacer.get_new_class('_New'))
def test_a_single_parameter(self):
code = 'def func(param):\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n'
' def __init__(self, param):\n self.param = param\n\n'
' def __call__(self):\n return 1\n',
replacer.get_new_class('_New'))
def test_self_parameter(self):
code = 'def func(self):\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n'
' def __init__(self, host):\n self.self = host\n\n'
' def __call__(self):\n return 1\n',
replacer.get_new_class('_New'))
def test_simple_using_passed_parameters(self):
code = 'def func(param):\n return param\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.assertEquals(
'class _New(object):\n\n'
' def __init__(self, param):\n self.param = param\n\n'
' def __call__(self):\n return self.param\n',
replacer.get_new_class('_New'))
def test_self_keywords_and_args_parameters(self):
code = 'def func(arg, *args, **kwds):\n' \
' result = arg + args[0] + kwds[arg]\n' \
' return result\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
expected = 'class _New(object):\n\n' \
' def __init__(self, arg, args, kwds):\n' \
' self.arg = arg\n' \
' self.args = args\n' \
' self.kwds = kwds\n\n' \
' def __call__(self):\n' \
' result = self.arg + ' \
'self.args[0] + self.kwds[self.arg]\n' \
' return result\n'
self.assertEquals(expected, replacer.get_new_class('_New'))
def test_performing_on_not_a_function(self):
code = 'my_var = 10\n'
self.mod.write(code)
with self.assertRaises(RefactoringError):
MethodObject(self.project, self.mod, code.index('my_var'))
def test_changing_the_module(self):
code = 'def func():\n return 1\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.project.do(replacer.get_changes('_New'))
expected = 'def func():\n' \
' return _New()()\n\n\n' \
'class _New(object):\n\n' \
' def __call__(self):\n' \
' return 1\n'
self.assertEquals(expected, self.mod.read())
def test_changing_the_module_and_class_methods(self):
code = 'class C(object):\n\n' \
' def a_func(self):\n' \
' return 1\n\n' \
' def another_func(self):\n' \
' pass\n'
self.mod.write(code)
replacer = MethodObject(self.project, self.mod, code.index('func'))
self.project.do(replacer.get_changes('_New'))
expected = 'class C(object):\n\n' \
' def a_func(self):\n' \
' return _New(self)()\n\n' \
' def another_func(self):\n' \
' pass\n\n\n' \
'class _New(object):\n\n' \
' def __init__(self, host):\n' \
' self.self = host\n\n' \
' def __call__(self):\n' \
' return 1\n'
self.assertEquals(expected, self.mod.read())
class IntroduceFactoryTest(unittest.TestCase):
def setUp(self):
super(IntroduceFactoryTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super(IntroduceFactoryTest, self).tearDown()
def _introduce_factory(self, resource, offset, *args, **kwds):
factory_introducer = IntroduceFactory(self.project,
resource, offset)
changes = factory_introducer.get_changes(*args, **kwds)
self.project.do(changes)
def test_adding_the_method(self):
code = 'class AClass(object):\n an_attr = 10\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
self._introduce_factory(mod, mod.read().index('AClass') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_changing_occurances_in_the_main_module(self):
code = 'class AClass(object):\n' \
' an_attr = 10\n' \
'a_var = AClass()'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'\
'a_var = AClass.create()'
self._introduce_factory(mod, mod.read().index('AClass') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_changing_occurances_with_arguments(self):
code = 'class AClass(object):\n' \
' def __init__(self, arg):\n' \
' pass\n' \
'a_var = AClass(10)\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' def __init__(self, arg):\n' \
' pass\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n' \
'a_var = AClass.create(10)\n'
self._introduce_factory(mod, mod.read().index('AClass') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_changing_occurances_in_other_modules(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class AClass(object):\n an_attr = 10\n')
mod2.write('import mod1\na_var = mod1.AClass()\n')
self._introduce_factory(mod1, mod1.read().index('AClass') + 1,
'create')
expected1 = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
expected2 = 'import mod1\n' \
'a_var = mod1.AClass.create()\n'
self.assertEquals(expected1, mod1.read())
self.assertEquals(expected2, mod2.read())
def test_raising_exception_for_non_classes(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('def a_func():\n pass\n')
with self.assertRaises(RefactoringError):
self._introduce_factory(mod, mod.read().index('a_func') + 1,
'create')
def test_undoing_introduce_factory(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
code1 = 'class AClass(object):\n an_attr = 10\n'
mod1.write(code1)
code2 = 'from mod1 import AClass\na_var = AClass()\n'
mod2.write(code2)
self._introduce_factory(mod1, mod1.read().index('AClass') + 1,
'create')
self.project.history.undo()
self.assertEquals(code1, mod1.read())
self.assertEquals(code2, mod2.read())
def test_using_on_an_occurance_outside_the_main_module(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class AClass(object):\n an_attr = 10\n')
mod2.write('import mod1\na_var = mod1.AClass()\n')
self._introduce_factory(mod2, mod2.read().index('AClass') + 1,
'create')
expected1 = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
expected2 = 'import mod1\n' \
'a_var = mod1.AClass.create()\n'
self.assertEquals(expected1, mod1.read())
self.assertEquals(expected2, mod2.read())
def test_introduce_factory_in_nested_scopes(self):
code = 'def create_var():\n'\
' class AClass(object):\n'\
' an_attr = 10\n'\
' return AClass()\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'def create_var():\n'\
' class AClass(object):\n'\
' an_attr = 10\n\n'\
' @staticmethod\n ' \
'def create(*args, **kwds):\n'\
' return AClass(*args, **kwds)\n'\
' return AClass.create()\n'
self._introduce_factory(mod, mod.read().index('AClass') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_adding_factory_for_global_factories(self):
code = 'class AClass(object):\n an_attr = 10\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
'def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
self._introduce_factory(mod, mod.read().index('AClass') + 1,
'create', global_factory=True)
self.assertEquals(expected, mod.read())
def test_get_name_for_factories(self):
code = 'class C(object):\n pass\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
factory = IntroduceFactory(self.project, mod,
mod.read().index('C') + 1)
self.assertEquals('C', factory.get_name())
def test_raising_exception_for_global_factory_for_nested_classes(self):
code = 'def create_var():\n'\
' class AClass(object):\n'\
' an_attr = 10\n'\
' return AClass()\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
with self.assertRaises(RefactoringError):
self._introduce_factory(mod, mod.read().index('AClass') + 1,
'create', global_factory=True)
def test_changing_occurances_in_the_main_module_for_global_factories(self):
code = 'class AClass(object):\n' \
' an_attr = 10\n' \
'a_var = AClass()'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n an_attr = 10\n\n' \
'def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'\
'a_var = create()'
self._introduce_factory(mod, mod.read().index('AClass') + 1,
'create', global_factory=True)
self.assertEquals(expected, mod.read())
def test_changing_occurances_in_other_modules_for_global_factories(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class AClass(object):\n an_attr = 10\n')
mod2.write('import mod1\na_var = mod1.AClass()\n')
self._introduce_factory(mod1, mod1.read().index('AClass') + 1,
'create', global_factory=True)
expected1 = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
'def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
expected2 = 'import mod1\n' \
'a_var = mod1.create()\n'
self.assertEquals(expected1, mod1.read())
self.assertEquals(expected2, mod2.read())
def test_import_if_necessary_in_other_mods_for_global_factories(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class AClass(object):\n an_attr = 10\n')
mod2.write('from mod1 import AClass\npair = AClass(), AClass\n')
self._introduce_factory(mod1, mod1.read().index('AClass') + 1,
'create', global_factory=True)
expected1 = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
'def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n'
expected2 = 'from mod1 import AClass, create\n' \
'pair = create(), AClass\n'
self.assertEquals(expected1, mod1.read())
self.assertEquals(expected2, mod2.read())
def test_changing_occurances_for_renamed_classes(self):
code = 'class AClass(object):\n an_attr = 10' \
'\na_class = AClass\na_var = a_class()'
mod = testutils.create_module(self.project, 'mod')
mod.write(code)
expected = 'class AClass(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return AClass(*args, **kwds)\n' \
'a_class = AClass\n' \
'a_var = a_class()'
self._introduce_factory(mod, mod.read().index('a_class') + 1, 'create')
self.assertEquals(expected, mod.read())
def test_changing_occurrs_in_the_same_module_with_conflict_ranges(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def create(self):\n' \
' return C()\n'
mod.write(code)
self._introduce_factory(mod, mod.read().index('C'), 'create_c', True)
expected = 'class C(object):\n' \
' def create(self):\n' \
' return create_c()\n'
self.assertTrue(mod.read().startswith(expected))
def _transform_module_to_package(self, resource):
self.project.do(rope.refactor.ModuleToPackage(
self.project, resource).get_changes())
def test_transform_module_to_package(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('import mod2\nfrom mod2 import AClass\n')
mod2 = testutils.create_module(self.project, 'mod2')
mod2.write('class AClass(object):\n pass\n')
self._transform_module_to_package(mod2)
mod2 = self.project.get_resource('mod2')
root_folder = self.project.root
self.assertFalse(root_folder.has_child('mod2.py'))
self.assertEquals('class AClass(object):\n pass\n',
root_folder.get_child('mod2').
get_child('__init__.py').read())
def test_transform_module_to_package_undoing(self):
pkg = testutils.create_package(self.project, 'pkg')
mod = testutils.create_module(self.project, 'mod', pkg)
self._transform_module_to_package(mod)
self.assertFalse(pkg.has_child('mod.py'))
self.assertTrue(pkg.get_child('mod').has_child('__init__.py'))
self.project.history.undo()
self.assertTrue(pkg.has_child('mod.py'))
self.assertFalse(pkg.has_child('mod'))
def test_transform_module_to_package_with_relative_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod1.write('import mod2\nfrom mod2 import AClass\n')
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('class AClass(object):\n pass\n')
self._transform_module_to_package(mod1)
new_init = self.project.get_resource('pkg/mod1/__init__.py')
self.assertEquals('import pkg.mod2\nfrom pkg.mod2 import AClass\n',
new_init.read())
def test_resources_parameter(self):
code = 'class A(object):\n an_attr = 10\n'
code1 = 'import mod\na = mod.A()\n'
mod = testutils.create_module(self.project, 'mod')
mod1 = testutils.create_module(self.project, 'mod1')
mod.write(code)
mod1.write(code1)
expected = 'class A(object):\n' \
' an_attr = 10\n\n' \
' @staticmethod\n' \
' def create(*args, **kwds):\n' \
' return A(*args, **kwds)\n'
self._introduce_factory(mod, mod.read().index('A') + 1,
'create', resources=[mod])
self.assertEquals(expected, mod.read())
self.assertEquals(code1, mod1.read())
class EncapsulateFieldTest(unittest.TestCase):
def setUp(self):
super(EncapsulateFieldTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
self.mod1 = testutils.create_module(self.project, 'mod1')
self.a_class = 'class A(object):\n' \
' def __init__(self):\n' \
' self.attr = 1\n'
self.added_methods = '\n' \
' def get_attr(self):\n' \
' return self.attr\n\n' \
' def set_attr(self, value):\n' \
' self.attr = value\n'
self.encapsulated = self.a_class + self.added_methods
def tearDown(self):
testutils.remove_project(self.project)
super(EncapsulateFieldTest, self).tearDown()
def _encapsulate(self, resource, offset, **args):
changes = EncapsulateField(self.project, resource, offset).\
get_changes(**args)
self.project.do(changes)
def test_adding_getters_and_setters(self):
code = self.a_class
self.mod.write(code)
self._encapsulate(self.mod, code.index('attr') + 1)
self.assertEquals(self.encapsulated, self.mod.read())
def test_changing_getters_in_other_modules(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'range(a_var.attr)\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'range(a_var.get_attr())\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_setters_in_other_modules(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr = 1\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.set_attr(1)\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_getters_in_setters(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr = 1 + a_var.attr\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.set_attr(1 + a_var.get_attr())\n'
self.assertEquals(expected, self.mod1.read())
def test_appending_to_class_end(self):
self.mod1.write(self.a_class + 'a_var = A()\n')
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
self.assertEquals(self.encapsulated + 'a_var = A()\n',
self.mod1.read())
def test_performing_in_other_modules(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'range(a_var.attr)\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
self.assertEquals(self.encapsulated, self.mod.read())
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'range(a_var.get_attr())\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_main_module_occurances(self):
code = self.a_class + \
'a_var = A()\n' \
'a_var.attr = a_var.attr * 2\n'
self.mod1.write(code)
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
expected = self.encapsulated + \
'a_var = A()\n' \
'a_var.set_attr(a_var.get_attr() * 2)\n'
self.assertEquals(expected, self.mod1.read())
def test_raising_exception_when_performed_on_non_attributes(self):
self.mod1.write('attr = 10')
with self.assertRaises(RefactoringError):
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
def test_raising_exception_on_tuple_assignments(self):
self.mod.write(self.a_class)
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr = 1\n' \
'a_var.attr, b = 1, 2\n'
self.mod1.write(code)
with self.assertRaises(RefactoringError):
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
def test_raising_exception_on_tuple_assignments2(self):
self.mod.write(self.a_class)
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr = 1\n' \
'b, a_var.attr = 1, 2\n'
self.mod1.write(code)
with self.assertRaises(RefactoringError):
self._encapsulate(self.mod1, self.mod1.read().index('attr') + 1)
def test_tuple_assignments_and_function_calls(self):
code = 'import mod\n' \
'def func(a1=0, a2=0):\n' \
' pass\n' \
'a_var = mod.A()\n' \
'func(a_var.attr, a2=2)\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'def func(a1=0, a2=0):\n' \
' pass\n' \
'a_var = mod.A()\n' \
'func(a_var.get_attr(), a2=2)\n'
self.assertEquals(expected, self.mod1.read())
def test_tuple_assignments(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a, b = a_var.attr, 1\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a, b = a_var.get_attr(), 1\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_augmented_assignments(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr += 1\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.set_attr(a_var.get_attr() + 1)\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_augmented_assignments2(self):
code = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.attr <<= 1\n'
self.mod1.write(code)
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = 'import mod\n' \
'a_var = mod.A()\n' \
'a_var.set_attr(a_var.get_attr() << 1)\n'
self.assertEquals(expected, self.mod1.read())
def test_changing_occurrences_inside_the_class(self):
new_class = self.a_class + '\n' \
' def a_func(self):\n' \
' self.attr = 1\n'
self.mod.write(new_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1)
expected = self.a_class + '\n' \
' def a_func(self):\n' \
' self.set_attr(1)\n' + \
self.added_methods
self.assertEquals(expected, self.mod.read())
def test_getter_and_setter_parameters(self):
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1,
getter='getAttr', setter='setAttr')
new_methods = self.added_methods.replace('get_attr', 'getAttr').\
replace('set_attr', 'setAttr')
expected = self.a_class + new_methods
self.assertEquals(expected, self.mod.read())
def test_using_resources_parameter(self):
self.mod1.write('import mod\na = mod.A()\nvar = a.attr\n')
self.mod.write(self.a_class)
self._encapsulate(self.mod, self.mod.read().index('attr') + 1,
resources=[self.mod])
self.assertEquals('import mod\na = mod.A()\nvar = a.attr\n',
self.mod1.read())
expected = self.a_class + self.added_methods
self.assertEquals(expected, self.mod.read())
class LocalToFieldTest(unittest.TestCase):
def setUp(self):
super(LocalToFieldTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
def tearDown(self):
testutils.remove_project(self.project)
super(LocalToFieldTest, self).tearDown()
def _perform_convert_local_variable_to_field(self, resource, offset):
changes = LocalToField(
self.project, resource, offset).get_changes()
self.project.do(changes)
def test_simple_local_to_field(self):
code = 'class A(object):\n' \
' def a_func(self):\n' \
' var = 10\n'
self.mod.write(code)
self._perform_convert_local_variable_to_field(self.mod,
code.index('var') + 1)
expected = 'class A(object):\n' \
' def a_func(self):\n' \
' self.var = 10\n'
self.assertEquals(expected, self.mod.read())
def test_raising_exception_when_performed_on_a_global_var(self):
self.mod.write('var = 10\n')
with self.assertRaises(RefactoringError):
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().index('var') + 1)
def test_raising_exception_when_performed_on_field(self):
code = 'class A(object):\n' \
' def a_func(self):\n' \
' self.var = 10\n'
self.mod.write(code)
with self.assertRaises(RefactoringError):
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().index('var') + 1)
def test_raising_exception_when_performed_on_a_parameter(self):
code = 'class A(object):\n' \
' def a_func(self, var):\n' \
' a = var\n'
self.mod.write(code)
with self.assertRaises(RefactoringError):
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().index('var') + 1)
# NOTE: This situation happens alot and is normally not an error
#@testutils.assert_raises(RefactoringError)
def test_not_rais_exception_when_there_is_a_field_with_the_same_name(self):
code = 'class A(object):\n' \
' def __init__(self):\n' \
' self.var = 1\n' \
' def a_func(self):\n var = 10\n'
self.mod.write(code)
self._perform_convert_local_variable_to_field(
self.mod, self.mod.read().rindex('var') + 1)
def test_local_to_field_with_self_renamed(self):
code = 'class A(object):\n' \
' def a_func(myself):\n' \
' var = 10\n'
self.mod.write(code)
self._perform_convert_local_variable_to_field(self.mod,
code.index('var') + 1)
expected = 'class A(object):\n' \
' def a_func(myself):\n' \
' myself.var = 10\n'
self.assertEquals(expected, self.mod.read())
class IntroduceParameterTest(unittest.TestCase):
def setUp(self):
super(IntroduceParameterTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
def tearDown(self):
testutils.remove_project(self.project)
super(IntroduceParameterTest, self).tearDown()
def _introduce_parameter(self, offset, name):
rope.refactor.introduce_parameter.IntroduceParameter(
self.project, self.mod, offset).get_changes(name).do()
def test_simple_case(self):
code = 'var = 1\n' \
'def f():\n' \
' b = var\n'
self.mod.write(code)
offset = self.mod.read().rindex('var')
self._introduce_parameter(offset, 'var')
expected = 'var = 1\n' \
'def f(var=var):\n' \
' b = var\n'
self.assertEquals(expected, self.mod.read())
def test_changing_function_body(self):
code = 'var = 1\n' \
'def f():\n' \
' b = var\n'
self.mod.write(code)
offset = self.mod.read().rindex('var')
self._introduce_parameter(offset, 'p1')
expected = 'var = 1\n' \
'def f(p1=var):\n' \
' b = p1\n'
self.assertEquals(expected, self.mod.read())
def test_unknown_variables(self):
self.mod.write('def f():\n b = var + c\n')
offset = self.mod.read().rindex('var')
with self.assertRaises(RefactoringError):
self._introduce_parameter(offset, 'p1')
self.assertEquals('def f(p1=var):\n b = p1 + c\n',
self.mod.read())
def test_failing_when_not_inside(self):
self.mod.write('var = 10\nb = var\n')
offset = self.mod.read().rindex('var')
with self.assertRaises(RefactoringError):
self._introduce_parameter(offset, 'p1')
def test_attribute_accesses(self):
code = 'class C(object):\n' \
' a = 10\nc = C()\n' \
'def f():\n' \
' b = c.a\n'
self.mod.write(code)
offset = self.mod.read().rindex('a')
self._introduce_parameter(offset, 'p1')
expected = 'class C(object):\n' \
' a = 10\n' \
'c = C()\n' \
'def f(p1=c.a):\n' \
' b = p1\n'
self.assertEquals(expected, self.mod.read())
def test_introducing_parameters_for_methods(self):
code = 'var = 1\n' \
'class C(object):\n' \
' def f(self):\n' \
' b = var\n'
self.mod.write(code)
offset = self.mod.read().rindex('var')
self._introduce_parameter(offset, 'p1')
expected = 'var = 1\n' \
'class C(object):\n' \
' def f(self, p1=var):\n' \
' b = p1\n'
self.assertEquals(expected, self.mod.read())
class _MockTaskObserver(object):
def __init__(self):
self.called = 0
def __call__(self):
self.called += 1
class TaskHandleTest(unittest.TestCase):
def test_trivial_case(self):
handle = rope.base.taskhandle.TaskHandle()
self.assertFalse(handle.is_stopped())
def test_stopping(self):
handle = rope.base.taskhandle.TaskHandle()
handle.stop()
self.assertTrue(handle.is_stopped())
def test_job_sets(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset()
self.assertEquals([jobs], handle.get_jobsets())
def test_starting_and_finishing_jobs(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset(name='test job set', count=1)
jobs.started_job('job1')
jobs.finished_job()
def test_test_checking_status(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset()
handle.stop()
with self.assertRaises(InterruptedTaskError):
jobs.check_status()
def test_test_checking_status_when_starting(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset()
handle.stop()
with self.assertRaises(InterruptedTaskError):
jobs.started_job('job1')
def test_calling_the_observer_after_stopping(self):
handle = rope.base.taskhandle.TaskHandle()
observer = _MockTaskObserver()
handle.add_observer(observer)
handle.stop()
self.assertEquals(1, observer.called)
def test_calling_the_observer_after_creating_job_sets(self):
handle = rope.base.taskhandle.TaskHandle()
observer = _MockTaskObserver()
handle.add_observer(observer)
jobs = handle.create_jobset() # noqa
self.assertEquals(1, observer.called)
def test_calling_the_observer_when_starting_and_finishing_jobs(self):
handle = rope.base.taskhandle.TaskHandle()
observer = _MockTaskObserver()
handle.add_observer(observer)
jobs = handle.create_jobset(name='test job set', count=1)
jobs.started_job('job1')
jobs.finished_job()
self.assertEquals(3, observer.called)
def test_job_set_get_percent_done(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset(name='test job set', count=2)
self.assertEquals(0, jobs.get_percent_done())
jobs.started_job('job1')
jobs.finished_job()
self.assertEquals(50, jobs.get_percent_done())
jobs.started_job('job2')
jobs.finished_job()
self.assertEquals(100, jobs.get_percent_done())
def test_getting_job_name(self):
handle = rope.base.taskhandle.TaskHandle()
jobs = handle.create_jobset(name='test job set', count=1)
self.assertEquals('test job set', jobs.get_name())
self.assertEquals(None, jobs.get_active_job_name())
jobs.started_job('job1')
self.assertEquals('job1', jobs.get_active_job_name())
def suite():
result = unittest.TestSuite()
result.addTests(ropetest.refactor.renametest.suite())
result.addTests(unittest.makeSuite(
ropetest.refactor.extracttest.ExtractMethodTest))
result.addTests(unittest.makeSuite(IntroduceFactoryTest))
result.addTests(unittest.makeSuite(
ropetest.refactor.movetest.MoveRefactoringTest))
result.addTests(ropetest.refactor.inlinetest.suite())
result.addTests(unittest.makeSuite(
ropetest.refactor.patchedasttest.PatchedASTTest))
result.addTests(unittest.makeSuite(EncapsulateFieldTest))
result.addTests(unittest.makeSuite(LocalToFieldTest))
result.addTests(unittest.makeSuite(
change_signature_test.ChangeSignatureTest))
result.addTests(unittest.makeSuite(IntroduceParameterTest))
result.addTests(ropetest.refactor.importutilstest.suite())
result.addTests(similarfindertest.suite())
result.addTests(unittest.makeSuite(TaskHandleTest))
result.addTests(unittest.makeSuite(ropetest.refactor.
restructuretest.RestructureTest))
result.addTests(unittest.makeSuite(ropetest.refactor.
suitestest.SuiteTest))
result.addTests(unittest.makeSuite(ropetest.refactor.multiprojecttest.
MultiProjectRefactoringTest))
result.addTests(unittest.makeSuite(ropetest.refactor.usefunctiontest.
UseFunctionTest))
return result
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
unittest.main()
else:
runner = unittest.TextTestRunner()
result = runner.run(suite())
sys.exit(not result.wasSuccessful()) | /ropee-0.13.3.tar.gz/ropee-0.13.3/ropetest/refactor/__init__.py | 0.586168 | 0.284123 | __init__.py | pypi |
from rope.base import ast, evaluate, pyobjects
def find_errors(project, resource):
"""Find possible bad name and attribute accesses
It returns a list of `Error`\s.
"""
pymodule = project.get_pymodule(resource)
finder = _BadAccessFinder(pymodule)
ast.walk(pymodule.get_ast(), finder)
return finder.errors
class _BadAccessFinder(object):
def __init__(self, pymodule):
self.pymodule = pymodule
self.scope = pymodule.get_scope()
self.errors = []
def _Name(self, node):
if isinstance(node.ctx, (ast.Store, ast.Param)):
return
scope = self.scope.get_inner_scope_for_line(node.lineno)
pyname = scope.lookup(node.id)
if pyname is None:
self._add_error(node, 'Unresolved variable')
elif self._is_defined_after(scope, pyname, node.lineno):
self._add_error(node, 'Defined later')
def _Attribute(self, node):
if not isinstance(node.ctx, ast.Store):
scope = self.scope.get_inner_scope_for_line(node.lineno)
pyname = evaluate.eval_node(scope, node.value)
if pyname is not None and \
pyname.get_object() != pyobjects.get_unknown():
if node.attr not in pyname.get_object():
self._add_error(node, 'Unresolved attribute')
ast.walk(node.value, self)
def _add_error(self, node, msg):
if isinstance(node, ast.Attribute):
name = node.attr
else:
name = node.id
if name != 'None':
error = Error(node.lineno, msg + ' ' + name)
self.errors.append(error)
def _is_defined_after(self, scope, pyname, lineno):
location = pyname.get_definition_location()
if location is not None and location[1] is not None:
if location[0] == self.pymodule and \
lineno <= location[1] <= scope.get_end():
return True
class Error(object):
def __init__(self, lineno, error):
self.lineno = lineno
self.error = error
def __str__(self):
return '%s: %s' % (self.lineno, self.error) | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/contrib/finderrors.py | 0.711331 | 0.236109 | finderrors.py | pypi |
import rope.base.codeanalyze
import rope.base.evaluate
import rope.base.pyobjects
from rope.base import taskhandle, exceptions, worder
from rope.contrib import fixsyntax
from rope.refactor import occurrences
def find_occurrences(project, resource, offset, unsure=False, resources=None,
in_hierarchy=False,
task_handle=taskhandle.NullTaskHandle()):
"""Return a list of `Location`\s
If `unsure` is `True`, possible matches are returned, too. You
can use `Location.unsure` to see which are unsure occurrences.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
"""
name = worder.get_name_at(resource, offset)
this_pymodule = project.get_pymodule(resource)
primary, pyname = rope.base.evaluate.eval_location2(
this_pymodule, offset)
def is_match(occurrence):
return unsure
finder = occurrences.create_finder(
project, name, pyname, unsure=is_match,
in_hierarchy=in_hierarchy, instance=primary)
if resources is None:
resources = project.get_python_files()
job_set = task_handle.create_jobset('Finding Occurrences',
count=len(resources))
return _find_locations(finder, resources, job_set)
def find_implementations(project, resource, offset, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Find the places a given method is overridden.
Finds the places a method is implemented. Returns a list of
`Location`\s.
"""
name = worder.get_name_at(resource, offset)
this_pymodule = project.get_pymodule(resource)
pyname = rope.base.evaluate.eval_location(this_pymodule, offset)
if pyname is not None:
pyobject = pyname.get_object()
if not isinstance(pyobject, rope.base.pyobjects.PyFunction) or \
pyobject.get_kind() != 'method':
raise exceptions.BadIdentifierError('Not a method!')
else:
raise exceptions.BadIdentifierError('Cannot resolve the identifier!')
def is_defined(occurrence):
if not occurrence.is_defined():
return False
def not_self(occurrence):
if occurrence.get_pyname().get_object() == pyname.get_object():
return False
filters = [is_defined, not_self,
occurrences.InHierarchyFilter(pyname, True)]
finder = occurrences.Finder(project, name, filters=filters)
if resources is None:
resources = project.get_python_files()
job_set = task_handle.create_jobset('Finding Implementations',
count=len(resources))
return _find_locations(finder, resources, job_set)
def find_definition(project, code, offset, resource=None, maxfixes=1):
"""Return the definition location of the python name at `offset`
A `Location` object is returned if the definition location can be
determined, otherwise ``None`` is returned.
"""
fixer = fixsyntax.FixSyntax(project, code, resource, maxfixes)
pyname = fixer.pyname_at(offset)
if pyname is not None:
module, lineno = pyname.get_definition_location()
name = rope.base.worder.Worder(code).get_word_at(offset)
if lineno is not None:
start = module.lines.get_line_start(lineno)
def check_offset(occurrence):
if occurrence.offset < start:
return False
pyname_filter = occurrences.PyNameFilter(pyname)
finder = occurrences.Finder(project, name,
[check_offset, pyname_filter])
for occurrence in finder.find_occurrences(pymodule=module):
return Location(occurrence)
class Location(object):
def __init__(self, occurrence):
self.resource = occurrence.resource
self.region = occurrence.get_word_range()
self.offset = self.region[0]
self.unsure = occurrence.is_unsure()
self.lineno = occurrence.lineno
def _find_locations(finder, resources, job_set):
result = []
for resource in resources:
job_set.started_job(resource.path)
for occurrence in finder.find_occurrences(resource):
result.append(Location(occurrence))
job_set.finished_job()
return result | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/contrib/findit.py | 0.695338 | 0.275672 | findit.py | pypi |
from rope.base.fscommands import _decode_data
from rope.base import ast, exceptions, utils
class PyObject(object):
def __init__(self, type_):
if type_ is None:
type_ = self
self.type = type_
def get_attributes(self):
if self.type is self:
return {}
return self.type.get_attributes()
def get_attribute(self, name):
if name not in self.get_attributes():
raise exceptions.AttributeNotFoundError(
'Attribute %s not found' % name)
return self.get_attributes()[name]
def get_type(self):
return self.type
def __getitem__(self, key):
"""The same as ``get_attribute(key)``"""
return self.get_attribute(key)
def __contains__(self, key):
"""The same as ``key in self.get_attributes()``"""
return key in self.get_attributes()
def __eq__(self, obj):
"""Check the equality of two `PyObject`\s
Currently it is assumed that instances (the direct instances
of `PyObject`, not the instances of its subclasses) are equal
if their types are equal. For every other object like
defineds or builtins rope assumes objects are reference
objects and their identities should match.
"""
if self.__class__ != obj.__class__:
return False
if type(self) == PyObject:
if self is not self.type:
return self.type == obj.type
else:
return self.type is obj.type
return self is obj
def __ne__(self, obj):
return not self.__eq__(obj)
def __hash__(self):
"""See docs for `__eq__()` method"""
if type(self) == PyObject and self != self.type:
return hash(self.type) + 1
else:
return super(PyObject, self).__hash__()
def __iter__(self):
"""The same as ``iter(self.get_attributes())``"""
return iter(self.get_attributes())
_types = None
_unknown = None
@staticmethod
def _get_base_type(name):
if PyObject._types is None:
PyObject._types = {}
base_type = PyObject(None)
PyObject._types['Type'] = base_type
PyObject._types['Module'] = PyObject(base_type)
PyObject._types['Function'] = PyObject(base_type)
PyObject._types['Unknown'] = PyObject(base_type)
return PyObject._types[name]
def get_base_type(name):
"""Return the base type with name `name`.
The base types are 'Type', 'Function', 'Module' and 'Unknown'. It
was used to check the type of a `PyObject` but currently its use
is discouraged. Use classes defined in this module instead.
For example instead of
``pyobject.get_type() == get_base_type('Function')`` use
``isinstance(pyobject, AbstractFunction)``.
You can use `AbstractClass` for classes, `AbstractFunction` for
functions, and `AbstractModule` for modules. You can also use
`PyFunction` and `PyClass` for testing if an object is
defined somewhere and rope can access its source. These classes
provide more methods.
"""
return PyObject._get_base_type(name)
def get_unknown():
"""Return a pyobject whose type is unknown
Note that two unknown objects are equal. So for example you can
write::
if pyname.get_object() == get_unknown():
print('cannot determine what this pyname holds')
Rope could have used `None` for indicating unknown objects but
we had to check that in many places. So actually this method
returns a null object.
"""
if PyObject._unknown is None:
PyObject._unknown = PyObject(get_base_type('Unknown'))
return PyObject._unknown
class AbstractClass(PyObject):
def __init__(self):
super(AbstractClass, self).__init__(get_base_type('Type'))
def get_name(self):
pass
def get_doc(self):
pass
def get_superclasses(self):
return []
class AbstractFunction(PyObject):
def __init__(self):
super(AbstractFunction, self).__init__(get_base_type('Function'))
def get_name(self):
pass
def get_doc(self):
pass
def get_param_names(self, special_args=True):
return []
def get_returned_object(self, args):
return get_unknown()
class AbstractModule(PyObject):
def __init__(self, doc=None):
super(AbstractModule, self).__init__(get_base_type('Module'))
def get_doc(self):
pass
def get_resource(self):
pass
class PyDefinedObject(object):
"""Python defined names that rope can access their sources"""
def __init__(self, pycore, ast_node, parent):
self.pycore = pycore
self.ast_node = ast_node
self.scope = None
self.parent = parent
self.structural_attributes = None
self.concluded_attributes = self.get_module()._get_concluded_data()
self.attributes = self.get_module()._get_concluded_data()
self.defineds = None
visitor_class = None
@utils.prevent_recursion(lambda: {})
def _get_structural_attributes(self):
if self.structural_attributes is None:
self.structural_attributes = self._create_structural_attributes()
return self.structural_attributes
@utils.prevent_recursion(lambda: {})
def _get_concluded_attributes(self):
if self.concluded_attributes.get() is None:
self._get_structural_attributes()
self.concluded_attributes.set(self._create_concluded_attributes())
return self.concluded_attributes.get()
def get_attributes(self):
if self.attributes.get() is None:
result = dict(self._get_concluded_attributes())
result.update(self._get_structural_attributes())
self.attributes.set(result)
return self.attributes.get()
def get_attribute(self, name):
if name in self._get_structural_attributes():
return self._get_structural_attributes()[name]
if name in self._get_concluded_attributes():
return self._get_concluded_attributes()[name]
raise exceptions.AttributeNotFoundError('Attribute %s not found' %
name)
def get_scope(self):
if self.scope is None:
self.scope = self._create_scope()
return self.scope
def get_module(self):
current_object = self
while current_object.parent is not None:
current_object = current_object.parent
return current_object
def get_doc(self):
if len(self.get_ast().body) > 0:
expr = self.get_ast().body[0]
if isinstance(expr, ast.Expr) and \
isinstance(expr.value, ast.Str):
docstring = expr.value.s
coding = self.get_module().coding
return _decode_data(docstring, coding)
def _get_defined_objects(self):
if self.defineds is None:
self._get_structural_attributes()
return self.defineds
def _create_structural_attributes(self):
if self.visitor_class is None:
return {}
new_visitor = self.visitor_class(self.pycore, self)
for child in ast.get_child_nodes(self.ast_node):
ast.walk(child, new_visitor)
self.defineds = new_visitor.defineds
return new_visitor.names
def _create_concluded_attributes(self):
return {}
def get_ast(self):
return self.ast_node
def _create_scope(self):
pass
class PyFunction(PyDefinedObject, AbstractFunction):
"""Only a placeholder"""
class PyClass(PyDefinedObject, AbstractClass):
"""Only a placeholder"""
class _ConcludedData(object):
def __init__(self):
self.data_ = None
def set(self, data):
self.data_ = data
def get(self):
return self.data_
data = property(get, set)
def _invalidate(self):
self.data = None
def __str__(self):
return '<' + str(self.data) + '>'
class _PyModule(PyDefinedObject, AbstractModule):
def __init__(self, pycore, ast_node, resource):
self.resource = resource
self.concluded_data = []
AbstractModule.__init__(self)
PyDefinedObject.__init__(self, pycore, ast_node, None)
def _get_concluded_data(self):
new_data = _ConcludedData()
self.concluded_data.append(new_data)
return new_data
def _forget_concluded_data(self):
for data in self.concluded_data:
data._invalidate()
def get_resource(self):
return self.resource
class PyModule(_PyModule):
"""Only a placeholder"""
class PyPackage(_PyModule):
"""Only a placeholder"""
class IsBeingInferredError(exceptions.RopeError):
pass | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/pyobjects.py | 0.845863 | 0.173358 | pyobjects.py | pypi |
import rope.base.evaluate
from rope.base import ast
class Arguments(object):
"""A class for evaluating parameters passed to a function
You can use the `create_arguments` factory. It handles implicit
first arguments.
"""
def __init__(self, args, scope):
self.args = args
self.scope = scope
self.instance = None
def get_arguments(self, parameters):
result = []
for pyname in self.get_pynames(parameters):
if pyname is None:
result.append(None)
else:
result.append(pyname.get_object())
return result
def get_pynames(self, parameters):
result = [None] * max(len(parameters), len(self.args))
for index, arg in enumerate(self.args):
if isinstance(arg, ast.keyword) and arg.arg in parameters:
result[parameters.index(arg.arg)] = self._evaluate(arg.value)
else:
result[index] = self._evaluate(arg)
return result
def get_instance_pyname(self):
if self.args:
return self._evaluate(self.args[0])
def _evaluate(self, ast_node):
return rope.base.evaluate.eval_node(self.scope, ast_node)
def create_arguments(primary, pyfunction, call_node, scope):
"""A factory for creating `Arguments`"""
args = list(call_node.args)
args.extend(call_node.keywords)
called = call_node.func
# XXX: Handle constructors
if _is_method_call(primary, pyfunction) and \
isinstance(called, ast.Attribute):
args.insert(0, called.value)
return Arguments(args, scope)
class ObjectArguments(object):
def __init__(self, pynames):
self.pynames = pynames
def get_arguments(self, parameters):
result = []
for pyname in self.pynames:
if pyname is None:
result.append(None)
else:
result.append(pyname.get_object())
return result
def get_pynames(self, parameters):
return self.pynames
def get_instance_pyname(self):
return self.pynames[0]
class MixedArguments(object):
def __init__(self, pyname, arguments, scope):
"""`argumens` is an instance of `Arguments`"""
self.pyname = pyname
self.args = arguments
def get_pynames(self, parameters):
return [self.pyname] + self.args.get_pynames(parameters[1:])
def get_arguments(self, parameters):
result = []
for pyname in self.get_pynames(parameters):
if pyname is None:
result.append(None)
else:
result.append(pyname.get_object())
return result
def get_instance_pyname(self):
return self.pyname
def _is_method_call(primary, pyfunction):
if primary is None:
return False
pyobject = primary.get_object()
if isinstance(pyobject.get_type(), rope.base.pyobjects.PyClass) and \
isinstance(pyfunction, rope.base.pyobjects.PyFunction) and \
isinstance(pyfunction.parent, rope.base.pyobjects.PyClass):
return True
if isinstance(pyobject.get_type(), rope.base.pyobjects.AbstractClass) and \
isinstance(pyfunction, rope.base.builtins.BuiltinFunction):
return True
return False | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/arguments.py | 0.658637 | 0.28703 | arguments.py | pypi |
import datetime
import difflib
import os
import time
import rope.base.fscommands
from rope.base import taskhandle, exceptions, utils
class Change(object):
"""The base class for changes
Rope refactorings return `Change` objects. They can be previewed,
committed or undone.
"""
def do(self, job_set=None):
"""Perform the change
.. note:: Do use this directly. Use `Project.do()` instead.
"""
def undo(self, job_set=None):
"""Perform the change
.. note:: Do use this directly. Use `History.undo()` instead.
"""
def get_description(self):
"""Return the description of this change
This can be used for previewing the changes.
"""
return str(self)
def get_changed_resources(self):
"""Return the list of resources that will be changed"""
return []
@property
@utils.saveit
def _operations(self):
return _ResourceOperations(self.resource.project)
class ChangeSet(Change):
"""A collection of `Change` objects
This class holds a collection of changes. This class provides
these fields:
* `changes`: the list of changes
* `description`: the goal of these changes
"""
def __init__(self, description, timestamp=None):
self.changes = []
self.description = description
self.time = timestamp
def do(self, job_set=taskhandle.NullJobSet()):
try:
done = []
for change in self.changes:
change.do(job_set)
done.append(change)
self.time = time.time()
except Exception:
for change in done:
change.undo()
raise
def undo(self, job_set=taskhandle.NullJobSet()):
try:
done = []
for change in reversed(self.changes):
change.undo(job_set)
done.append(change)
except Exception:
for change in done:
change.do()
raise
def add_change(self, change):
self.changes.append(change)
def get_description(self):
result = [str(self) + ':\n\n\n']
for change in self.changes:
result.append(change.get_description())
result.append('\n')
return ''.join(result)
def __str__(self):
if self.time is not None:
date = datetime.datetime.fromtimestamp(self.time)
if date.date() == datetime.date.today():
string_date = 'today'
elif date.date() == (datetime.date.today() -
datetime.timedelta(1)):
string_date = 'yesterday'
elif date.year == datetime.date.today().year:
string_date = date.strftime('%b %d')
else:
string_date = date.strftime('%d %b, %Y')
string_time = date.strftime('%H:%M:%S')
string_time = '%s %s ' % (string_date, string_time)
return self.description + ' - ' + string_time
return self.description
def get_changed_resources(self):
result = set()
for change in self.changes:
result.update(change.get_changed_resources())
return result
def _handle_job_set(function):
"""A decorator for handling `taskhandle.JobSet`\s
A decorator for handling `taskhandle.JobSet`\s for `do` and `undo`
methods of `Change`\s.
"""
def call(self, job_set=taskhandle.NullJobSet()):
job_set.started_job(str(self))
function(self)
job_set.finished_job()
return call
class ChangeContents(Change):
"""A class to change the contents of a file
Fields:
* `resource`: The `rope.base.resources.File` to change
* `new_contents`: What to write in the file
"""
def __init__(self, resource, new_contents, old_contents=None):
self.resource = resource
# IDEA: Only saving diffs; possible problems when undo/redoing
self.new_contents = new_contents
self.old_contents = old_contents
@_handle_job_set
def do(self):
if self.old_contents is None:
self.old_contents = self.resource.read()
self._operations.write_file(self.resource, self.new_contents)
@_handle_job_set
def undo(self):
if self.old_contents is None:
raise exceptions.HistoryError(
'Undoing a change that is not performed yet!')
self._operations.write_file(self.resource, self.old_contents)
def __str__(self):
return 'Change <%s>' % self.resource.path
def get_description(self):
new = self.new_contents
old = self.old_contents
if old is None:
if self.resource.exists():
old = self.resource.read()
else:
old = ''
result = difflib.unified_diff(
old.splitlines(True), new.splitlines(True),
'a/' + self.resource.path, 'b/' + self.resource.path)
return ''.join(list(result))
def get_changed_resources(self):
return [self.resource]
class MoveResource(Change):
"""Move a resource to a new location
Fields:
* `resource`: The `rope.base.resources.Resource` to move
* `new_resource`: The destination for move; It is the moved
resource not the folder containing that resource.
"""
def __init__(self, resource, new_location, exact=False):
self.project = resource.project
self.resource = resource
if not exact:
new_location = _get_destination_for_move(resource, new_location)
if resource.is_folder():
self.new_resource = self.project.get_folder(new_location)
else:
self.new_resource = self.project.get_file(new_location)
@_handle_job_set
def do(self):
self._operations.move(self.resource, self.new_resource)
@_handle_job_set
def undo(self):
self._operations.move(self.new_resource, self.resource)
def __str__(self):
return 'Move <%s>' % self.resource.path
def get_description(self):
return 'rename from %s\nrename to %s' % (self.resource.path,
self.new_resource.path)
def get_changed_resources(self):
return [self.resource, self.new_resource]
class CreateResource(Change):
"""A class to create a resource
Fields:
* `resource`: The resource to create
"""
def __init__(self, resource):
self.resource = resource
@_handle_job_set
def do(self):
self._operations.create(self.resource)
@_handle_job_set
def undo(self):
self._operations.remove(self.resource)
def __str__(self):
return 'Create Resource <%s>' % (self.resource.path)
def get_description(self):
return 'new file %s' % (self.resource.path)
def get_changed_resources(self):
return [self.resource]
def _get_child_path(self, parent, name):
if parent.path == '':
return name
else:
return parent.path + '/' + name
class CreateFolder(CreateResource):
"""A class to create a folder
See docs for `CreateResource`.
"""
def __init__(self, parent, name):
resource = parent.project.get_folder(
self._get_child_path(parent, name))
super(CreateFolder, self).__init__(resource)
class CreateFile(CreateResource):
"""A class to create a file
See docs for `CreateResource`.
"""
def __init__(self, parent, name):
resource = parent.project.get_file(self._get_child_path(parent, name))
super(CreateFile, self).__init__(resource)
class RemoveResource(Change):
"""A class to remove a resource
Fields:
* `resource`: The resource to be removed
"""
def __init__(self, resource):
self.resource = resource
@_handle_job_set
def do(self):
self._operations.remove(self.resource)
# TODO: Undoing remove operations
@_handle_job_set
def undo(self):
raise NotImplementedError(
'Undoing `RemoveResource` is not implemented yet.')
def __str__(self):
return 'Remove <%s>' % (self.resource.path)
def get_changed_resources(self):
return [self.resource]
def count_changes(change):
"""Counts the number of basic changes a `Change` will make"""
if isinstance(change, ChangeSet):
result = 0
for child in change.changes:
result += count_changes(child)
return result
return 1
def create_job_set(task_handle, change):
return task_handle.create_jobset(str(change), count_changes(change))
class _ResourceOperations(object):
def __init__(self, project):
self.project = project
self.fscommands = project.fscommands
self.direct_commands = rope.base.fscommands.FileSystemCommands()
def _get_fscommands(self, resource):
if self.project.is_ignored(resource):
return self.direct_commands
return self.fscommands
def write_file(self, resource, contents):
data = rope.base.fscommands.unicode_to_file_data(contents)
fscommands = self._get_fscommands(resource)
fscommands.write(resource.real_path, data)
for observer in list(self.project.observers):
observer.resource_changed(resource)
def move(self, resource, new_resource):
fscommands = self._get_fscommands(resource)
fscommands.move(resource.real_path, new_resource.real_path)
for observer in list(self.project.observers):
observer.resource_moved(resource, new_resource)
def create(self, resource):
if resource.is_folder():
self._create_resource(resource.path, kind='folder')
else:
self._create_resource(resource.path)
for observer in list(self.project.observers):
observer.resource_created(resource)
def remove(self, resource):
fscommands = self._get_fscommands(resource)
fscommands.remove(resource.real_path)
for observer in list(self.project.observers):
observer.resource_removed(resource)
def _create_resource(self, file_name, kind='file'):
resource_path = self.project._get_resource_path(file_name)
if os.path.exists(resource_path):
raise exceptions.RopeError('Resource <%s> already exists'
% resource_path)
resource = self.project.get_file(file_name)
if not resource.parent.exists():
raise exceptions.ResourceNotFoundError(
'Parent folder of <%s> does not exist' % resource.path)
fscommands = self._get_fscommands(resource)
try:
if kind == 'file':
fscommands.create_file(resource_path)
else:
fscommands.create_folder(resource_path)
except IOError as e:
raise exceptions.RopeError(e)
def _get_destination_for_move(resource, destination):
dest_path = resource.project._get_resource_path(destination)
if os.path.isdir(dest_path):
if destination != '':
return destination + '/' + resource.name
else:
return resource.name
return destination
class ChangeToData(object):
def convertChangeSet(self, change):
description = change.description
changes = []
for child in change.changes:
changes.append(self(child))
return (description, changes, change.time)
def convertChangeContents(self, change):
return (change.resource.path, change.new_contents, change.old_contents)
def convertMoveResource(self, change):
return (change.resource.path, change.new_resource.path)
def convertCreateResource(self, change):
return (change.resource.path, change.resource.is_folder())
def convertRemoveResource(self, change):
return (change.resource.path, change.resource.is_folder())
def __call__(self, change):
change_type = type(change)
if change_type in (CreateFolder, CreateFile):
change_type = CreateResource
method = getattr(self, 'convert' + change_type.__name__)
return (change_type.__name__, method(change))
class DataToChange(object):
def __init__(self, project):
self.project = project
def makeChangeSet(self, description, changes, time=None):
result = ChangeSet(description, time)
for child in changes:
result.add_change(self(child))
return result
def makeChangeContents(self, path, new_contents, old_contents):
resource = self.project.get_file(path)
return ChangeContents(resource, new_contents, old_contents)
def makeMoveResource(self, old_path, new_path):
resource = self.project.get_file(old_path)
return MoveResource(resource, new_path, exact=True)
def makeCreateResource(self, path, is_folder):
if is_folder:
resource = self.project.get_folder(path)
else:
resource = self.project.get_file(path)
return CreateResource(resource)
def makeRemoveResource(self, path, is_folder):
if is_folder:
resource = self.project.get_folder(path)
else:
resource = self.project.get_file(path)
return RemoveResource(resource)
def __call__(self, data):
method = getattr(self, 'make' + data[0])
return method(*data[1]) | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/change.py | 0.755637 | 0.189859 | change.py | pypi |
import bisect
import keyword
import rope.base.simplify
def get_name_at(resource, offset):
source_code = resource.read()
word_finder = Worder(source_code)
return word_finder.get_word_at(offset)
class Worder(object):
"""A class for finding boundaries of words and expressions
Note that in these methods, offset should be the index of the
character not the index of the character after it.
"""
def __init__(self, code, handle_ignores=False):
simplified = rope.base.simplify.real_code(code)
self.code_finder = _RealFinder(simplified, code)
self.handle_ignores = handle_ignores
self.code = code
def _init_ignores(self):
ignores = rope.base.simplify.ignored_regions(self.code)
self.dumb_finder = _RealFinder(self.code, self.code)
self.starts = [ignored[0] for ignored in ignores]
self.ends = [ignored[1] for ignored in ignores]
def _context_call(self, name, offset):
if self.handle_ignores:
if not hasattr(self, 'starts'):
self._init_ignores()
start = bisect.bisect(self.starts, offset)
if start > 0 and offset < self.ends[start - 1]:
return getattr(self.dumb_finder, name)(offset)
return getattr(self.code_finder, name)(offset)
def get_primary_at(self, offset):
return self._context_call('get_primary_at', offset)
def get_word_at(self, offset):
return self._context_call('get_word_at', offset)
def get_primary_range(self, offset):
return self._context_call('get_primary_range', offset)
def get_splitted_primary_before(self, offset):
return self._context_call('get_splitted_primary_before', offset)
def get_word_range(self, offset):
return self._context_call('get_word_range', offset)
def is_function_keyword_parameter(self, offset):
return self.code_finder.is_function_keyword_parameter(offset)
def is_a_class_or_function_name_in_header(self, offset):
return self.code_finder.is_a_class_or_function_name_in_header(offset)
def is_from_statement_module(self, offset):
return self.code_finder.is_from_statement_module(offset)
def is_from_aliased(self, offset):
return self.code_finder.is_from_aliased(offset)
def is_import_statement_aliased_module(self, offset):
return self.code_finder.is_import_statement_aliased_module(offset)
def find_parens_start_from_inside(self, offset):
return self.code_finder.find_parens_start_from_inside(offset)
def is_a_name_after_from_import(self, offset):
return self.code_finder.is_a_name_after_from_import(offset)
def is_from_statement(self, offset):
return self.code_finder.is_from_statement(offset)
def get_from_aliased(self, offset):
return self.code_finder.get_from_aliased(offset)
def is_import_statement(self, offset):
return self.code_finder.is_import_statement(offset)
def is_assigned_here(self, offset):
return self.code_finder.is_assigned_here(offset)
def is_a_function_being_called(self, offset):
return self.code_finder.is_a_function_being_called(offset)
def get_word_parens_range(self, offset):
return self.code_finder.get_word_parens_range(offset)
def is_name_assigned_in_class_body(self, offset):
return self.code_finder.is_name_assigned_in_class_body(offset)
def is_on_function_call_keyword(self, offset):
return self.code_finder.is_on_function_call_keyword(offset)
def _find_parens_start(self, offset):
return self.code_finder._find_parens_start(offset)
def get_parameters(self, first, last):
return self.code_finder.get_parameters(first, last)
def get_from_module(self, offset):
return self.code_finder.get_from_module(offset)
def is_assigned_in_a_tuple_assignment(self, offset):
return self.code_finder.is_assigned_in_a_tuple_assignment(offset)
def get_assignment_type(self, offset):
return self.code_finder.get_assignment_type(offset)
def get_function_and_args_in_header(self, offset):
return self.code_finder.get_function_and_args_in_header(offset)
def get_lambda_and_args(self, offset):
return self.code_finder.get_lambda_and_args(offset)
def find_function_offset(self, offset):
return self.code_finder.find_function_offset(offset)
class _RealFinder(object):
def __init__(self, code, raw):
self.code = code
self.raw = raw
def _find_word_start(self, offset):
current_offset = offset
while current_offset >= 0 and self._is_id_char(current_offset):
current_offset -= 1
return current_offset + 1
def _find_word_end(self, offset):
while offset + 1 < len(self.code) and self._is_id_char(offset + 1):
offset += 1
return offset
def _find_last_non_space_char(self, offset):
while offset >= 0 and self.code[offset].isspace():
if self.code[offset] == '\n':
return offset
offset -= 1
return max(-1, offset)
def get_word_at(self, offset):
offset = self._get_fixed_offset(offset)
return self.raw[self._find_word_start(offset):
self._find_word_end(offset) + 1]
def _get_fixed_offset(self, offset):
if offset >= len(self.code):
return offset - 1
if not self._is_id_char(offset):
if offset > 0 and self._is_id_char(offset - 1):
return offset - 1
if offset < len(self.code) - 1 and self._is_id_char(offset + 1):
return offset + 1
return offset
def _is_id_char(self, offset):
return self.code[offset].isalnum() or self.code[offset] == '_'
def _find_string_start(self, offset):
kind = self.code[offset]
try:
return self.code.rindex(kind, 0, offset)
except ValueError:
return 0
def _find_parens_start(self, offset):
offset = self._find_last_non_space_char(offset - 1)
while offset >= 0 and self.code[offset] not in '[({':
if self.code[offset] not in ':,':
offset = self._find_primary_start(offset)
offset = self._find_last_non_space_char(offset - 1)
return offset
def _find_atom_start(self, offset):
old_offset = offset
if self.code[offset] == '\n':
return offset + 1
if self.code[offset].isspace():
offset = self._find_last_non_space_char(offset)
if self.code[offset] in '\'"':
return self._find_string_start(offset)
if self.code[offset] in ')]}':
return self._find_parens_start(offset)
if self._is_id_char(offset):
return self._find_word_start(offset)
return old_offset
def _find_primary_without_dot_start(self, offset):
"""It tries to find the undotted primary start
It is different from `self._get_atom_start()` in that it
follows function calls, too; such as in ``f(x)``.
"""
last_atom = offset
offset = self._find_last_non_space_char(last_atom)
while offset > 0 and self.code[offset] in ')]':
last_atom = self._find_parens_start(offset)
offset = self._find_last_non_space_char(last_atom - 1)
if offset >= 0 and (self.code[offset] in '"\'})]' or
self._is_id_char(offset)):
atom_start = self._find_atom_start(offset)
if not keyword.iskeyword(self.code[atom_start:offset + 1]):
return atom_start
return last_atom
def _find_primary_start(self, offset):
if offset >= len(self.code):
offset = len(self.code) - 1
if self.code[offset] != '.':
offset = self._find_primary_without_dot_start(offset)
else:
offset = offset + 1
while offset > 0:
prev = self._find_last_non_space_char(offset - 1)
if offset <= 0 or self.code[prev] != '.':
break
offset = self._find_primary_without_dot_start(prev - 1)
if not self._is_id_char(offset):
break
return offset
def get_primary_at(self, offset):
offset = self._get_fixed_offset(offset)
start, end = self.get_primary_range(offset)
return self.raw[start:end].strip()
def get_splitted_primary_before(self, offset):
"""returns expression, starting, starting_offset
This function is used in `rope.codeassist.assist` function.
"""
if offset == 0:
return ('', '', 0)
end = offset - 1
word_start = self._find_atom_start(end)
real_start = self._find_primary_start(end)
if self.code[word_start:offset].strip() == '':
word_start = end
if self.code[end].isspace():
word_start = end
if self.code[real_start:word_start].strip() == '':
real_start = word_start
if real_start == word_start == end and not self._is_id_char(end):
return ('', '', offset)
if real_start == word_start:
return ('', self.raw[word_start:offset], word_start)
else:
if self.code[end] == '.':
return (self.raw[real_start:end], '', offset)
last_dot_position = word_start
if self.code[word_start] != '.':
last_dot_position = \
self._find_last_non_space_char(word_start - 1)
last_char_position = \
self._find_last_non_space_char(last_dot_position - 1)
if self.code[word_start].isspace():
word_start = offset
return (self.raw[real_start:last_char_position + 1],
self.raw[word_start:offset], word_start)
def _get_line_start(self, offset):
try:
return self.code.rindex('\n', 0, offset + 1)
except ValueError:
return 0
def _get_line_end(self, offset):
try:
return self.code.index('\n', offset)
except ValueError:
return len(self.code)
def is_name_assigned_in_class_body(self, offset):
word_start = self._find_word_start(offset - 1)
word_end = self._find_word_end(offset) + 1
if '.' in self.code[word_start:word_end]:
return False
line_start = self._get_line_start(word_start)
line = self.code[line_start:word_start].strip()
return not line and self.get_assignment_type(offset) == '='
def is_a_class_or_function_name_in_header(self, offset):
word_start = self._find_word_start(offset - 1)
line_start = self._get_line_start(word_start)
prev_word = self.code[line_start:word_start].strip()
return prev_word in ['def', 'class']
def _find_first_non_space_char(self, offset):
if offset >= len(self.code):
return len(self.code)
while offset < len(self.code) and self.code[offset].isspace():
if self.code[offset] == '\n':
return offset
offset += 1
return offset
def is_a_function_being_called(self, offset):
word_end = self._find_word_end(offset) + 1
next_char = self._find_first_non_space_char(word_end)
return next_char < len(self.code) and \
self.code[next_char] == '(' and \
not self.is_a_class_or_function_name_in_header(offset)
def _find_import_end(self, start):
return self._get_line_end(start)
def is_import_statement(self, offset):
try:
last_import = self.code.rindex('import ', 0, offset)
except ValueError:
return False
line_start = self._get_line_start(last_import)
return (self._find_import_end(last_import + 7) >= offset and
self._find_word_start(line_start) == last_import)
def is_from_statement(self, offset):
try:
last_from = self.code.rindex('from ', 0, offset)
from_import = self.code.index(' import ', last_from)
from_names = from_import + 8
except ValueError:
return False
from_names = self._find_first_non_space_char(from_names)
return self._find_import_end(from_names) >= offset
def is_from_statement_module(self, offset):
if offset >= len(self.code) - 1:
return False
stmt_start = self._find_primary_start(offset)
line_start = self._get_line_start(stmt_start)
prev_word = self.code[line_start:stmt_start].strip()
return prev_word == 'from'
def is_import_statement_aliased_module(self, offset):
if not self.is_import_statement(offset):
return False
try:
line_start = self._get_line_start(offset)
import_idx = self.code.rindex('import', line_start, offset)
imported_names = import_idx + 7
except ValueError:
return False
# Check if the offset is within the imported names
if (imported_names - 1 > offset or
self._find_import_end(imported_names) < offset):
return False
try:
end = self._find_word_end(offset)
as_end = min(self._find_word_end(end + 1), len(self.code))
as_start = self._find_word_start(as_end)
return self.code[as_start:as_end + 1] == 'as'
except ValueError:
return False
def is_a_name_after_from_import(self, offset):
try:
if len(self.code) > offset and self.code[offset] == '\n':
line_start = self._get_line_start(offset - 1)
else:
line_start = self._get_line_start(offset)
last_from = self.code.rindex('from ', line_start, offset)
from_import = self.code.index(' import ', last_from)
from_names = from_import + 8
except ValueError:
return False
if from_names - 1 > offset:
return False
return self._find_import_end(from_names) >= offset
def get_from_module(self, offset):
try:
last_from = self.code.rindex('from ', 0, offset)
import_offset = self.code.index(' import ', last_from)
end = self._find_last_non_space_char(import_offset)
return self.get_primary_at(end)
except ValueError:
pass
def is_from_aliased(self, offset):
if not self.is_a_name_after_from_import(offset):
return False
try:
end = self._find_word_end(offset)
as_end = min(self._find_word_end(end + 1), len(self.code))
as_start = self._find_word_start(as_end)
return self.code[as_start:as_end + 1] == 'as'
except ValueError:
return False
def get_from_aliased(self, offset):
try:
end = self._find_word_end(offset)
as_ = self._find_word_end(end + 1)
alias = self._find_word_end(as_ + 1)
start = self._find_word_start(alias)
return self.raw[start:alias + 1]
except ValueError:
pass
def is_function_keyword_parameter(self, offset):
word_end = self._find_word_end(offset)
if word_end + 1 == len(self.code):
return False
next_char = self._find_first_non_space_char(word_end + 1)
equals = self.code[next_char:next_char + 2]
if equals == '==' or not equals.startswith('='):
return False
word_start = self._find_word_start(offset)
prev_char = self._find_last_non_space_char(word_start - 1)
return prev_char - 1 >= 0 and self.code[prev_char] in ',('
def is_on_function_call_keyword(self, offset):
stop = self._get_line_start(offset)
if self._is_id_char(offset):
offset = self._find_word_start(offset) - 1
offset = self._find_last_non_space_char(offset)
if offset <= stop or self.code[offset] not in '(,':
return False
parens_start = self.find_parens_start_from_inside(offset)
return stop < parens_start
def find_parens_start_from_inside(self, offset):
stop = self._get_line_start(offset)
while offset > stop:
if self.code[offset] == '(':
break
if self.code[offset] != ',':
offset = self._find_primary_start(offset)
offset -= 1
return max(stop, offset)
def is_assigned_here(self, offset):
return self.get_assignment_type(offset) is not None
def get_assignment_type(self, offset):
# XXX: does not handle tuple assignments
word_end = self._find_word_end(offset)
next_char = self._find_first_non_space_char(word_end + 1)
single = self.code[next_char:next_char + 1]
double = self.code[next_char:next_char + 2]
triple = self.code[next_char:next_char + 3]
if double not in ('==', '<=', '>=', '!='):
for op in [single, double, triple]:
if op.endswith('='):
return op
def get_primary_range(self, offset):
start = self._find_primary_start(offset)
end = self._find_word_end(offset) + 1
return (start, end)
def get_word_range(self, offset):
offset = max(0, offset)
start = self._find_word_start(offset)
end = self._find_word_end(offset) + 1
return (start, end)
def get_word_parens_range(self, offset, opening='(', closing=')'):
end = self._find_word_end(offset)
start_parens = self.code.index(opening, end)
index = start_parens
open_count = 0
while index < len(self.code):
if self.code[index] == opening:
open_count += 1
if self.code[index] == closing:
open_count -= 1
if open_count == 0:
return (start_parens, index + 1)
index += 1
return (start_parens, index)
def get_parameters(self, first, last):
keywords = []
args = []
current = self._find_last_non_space_char(last - 1)
while current > first:
primary_start = current
current = self._find_primary_start(current)
while current != first and (self.code[current] not in '=,'
or self.code[current-1] in '=!<>'):
current = self._find_last_non_space_char(current - 1)
primary = self.raw[current + 1:primary_start + 1].strip()
if self.code[current] == '=':
primary_start = current - 1
current -= 1
while current != first and self.code[current] not in ',':
current = self._find_last_non_space_char(current - 1)
param_name = self.raw[current + 1:primary_start + 1].strip()
keywords.append((param_name, primary))
else:
args.append(primary)
current = self._find_last_non_space_char(current - 1)
args.reverse()
keywords.reverse()
return args, keywords
def is_assigned_in_a_tuple_assignment(self, offset):
start = self._get_line_start(offset)
end = self._get_line_end(offset)
primary_start = self._find_primary_start(offset)
primary_end = self._find_word_end(offset)
prev_char_offset = self._find_last_non_space_char(primary_start - 1)
next_char_offset = self._find_first_non_space_char(primary_end + 1)
next_char = prev_char = ''
if prev_char_offset >= start:
prev_char = self.code[prev_char_offset]
if next_char_offset < end:
next_char = self.code[next_char_offset]
try:
equals_offset = self.code.index('=', start, end)
except ValueError:
return False
if prev_char not in '(,' and next_char not in ',)':
return False
parens_start = self.find_parens_start_from_inside(offset)
# XXX: only handling (x, y) = value
return offset < equals_offset and \
self.code[start:parens_start].strip() == ''
def get_function_and_args_in_header(self, offset):
offset = self.find_function_offset(offset)
lparens, rparens = self.get_word_parens_range(offset)
return self.raw[offset:rparens + 1]
def find_function_offset(self, offset, definition='def '):
while True:
offset = self.code.index(definition, offset)
if offset == 0 or not self._is_id_char(offset - 1):
break
offset += 1
def_ = offset + 4
return self._find_first_non_space_char(def_)
def get_lambda_and_args(self, offset):
offset = self.find_function_offset(offset, definition='lambda ')
lparens, rparens = self.get_word_parens_range(offset, opening=' ',
closing=':')
return self.raw[offset:rparens + 1] | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/worder.py | 0.623835 | 0.239116 | worder.py | pypi |
import bisect
import re
import token
import tokenize
class ChangeCollector(object):
def __init__(self, text):
self.text = text
self.changes = []
def add_change(self, start, end, new_text=None):
if new_text is None:
new_text = self.text[start:end]
self.changes.append((start, end, new_text))
def get_changed(self):
if not self.changes:
return None
self.changes.sort(key=lambda x: x[:2])
pieces = []
last_changed = 0
for change in self.changes:
start, end, text = change
pieces.append(self.text[last_changed:start] + text)
last_changed = end
if last_changed < len(self.text):
pieces.append(self.text[last_changed:])
result = ''.join(pieces)
if result != self.text:
return result
class SourceLinesAdapter(object):
"""Adapts source to Lines interface
Note: The creation of this class is expensive.
"""
def __init__(self, source_code):
self.code = source_code
self.starts = None
self._initialize_line_starts()
def _initialize_line_starts(self):
self.starts = []
self.starts.append(0)
try:
i = 0
while True:
i = self.code.index('\n', i) + 1
self.starts.append(i)
except ValueError:
pass
self.starts.append(len(self.code) + 1)
def get_line(self, lineno):
return self.code[self.starts[lineno - 1]:
self.starts[lineno] - 1]
def length(self):
return len(self.starts) - 1
def get_line_number(self, offset):
return bisect.bisect(self.starts, offset)
def get_line_start(self, lineno):
return self.starts[lineno - 1]
def get_line_end(self, lineno):
return self.starts[lineno] - 1
class ArrayLinesAdapter(object):
def __init__(self, lines):
self.lines = lines
def get_line(self, line_number):
return self.lines[line_number - 1]
def length(self):
return len(self.lines)
class LinesToReadline(object):
def __init__(self, lines, start):
self.lines = lines
self.current = start
def readline(self):
if self.current <= self.lines.length():
self.current += 1
return self.lines.get_line(self.current - 1) + '\n'
return ''
def __call__(self):
return self.readline()
class _CustomGenerator(object):
def __init__(self, lines):
self.lines = lines
self.in_string = ''
self.open_count = 0
self.continuation = False
def __call__(self):
size = self.lines.length()
result = []
i = 1
while i <= size:
while i <= size and not self.lines.get_line(i).strip():
i += 1
if i <= size:
start = i
while True:
line = self.lines.get_line(i)
self._analyze_line(line)
if not (self.continuation or self.open_count or
self.in_string) or i == size:
break
i += 1
result.append((start, i))
i += 1
return result
# Matches all backslashes before the token, to detect escaped quotes
_main_tokens = re.compile(r'(\\*)((\'\'\'|"""|\'|")|#|\[|\]|\{|\}|\(|\))')
def _analyze_line(self, line):
token = None
for match in self._main_tokens.finditer(line):
prefix = match.group(1)
token = match.group(2)
# Skip any tokens which are escaped
if len(prefix) % 2 == 1:
continue
if token in ["'''", '"""', "'", '"']:
if not self.in_string:
self.in_string = token
elif self.in_string == token:
self.in_string = ''
if self.in_string:
continue
if token == '#':
break
if token in '([{':
self.open_count += 1
elif token in ')]}':
self.open_count -= 1
if line and token != '#' and line.endswith('\\'):
self.continuation = True
else:
self.continuation = False
def custom_generator(lines):
return _CustomGenerator(lines)()
class LogicalLineFinder(object):
def __init__(self, lines):
self.lines = lines
def logical_line_in(self, line_number):
indents = count_line_indents(self.lines.get_line(line_number))
tries = 0
while True:
block_start = get_block_start(self.lines, line_number, indents)
try:
return self._block_logical_line(block_start, line_number)
except IndentationError as e:
tries += 1
if tries == 5:
raise e
lineno = e.lineno + block_start - 1
indents = count_line_indents(self.lines.get_line(lineno))
def generate_starts(self, start_line=1, end_line=None):
for start, end in self.generate_regions(start_line, end_line):
yield start
def generate_regions(self, start_line=1, end_line=None):
# XXX: `block_start` should be at a better position!
block_start = 1
readline = LinesToReadline(self.lines, block_start)
try:
for start, end in self._logical_lines(readline):
real_start = start + block_start - 1
real_start = self._first_non_blank(real_start)
if end_line is not None and real_start >= end_line:
break
real_end = end + block_start - 1
if real_start >= start_line:
yield (real_start, real_end)
except tokenize.TokenError:
pass
def _block_logical_line(self, block_start, line_number):
readline = LinesToReadline(self.lines, block_start)
shifted = line_number - block_start + 1
region = self._calculate_logical(readline, shifted)
start = self._first_non_blank(region[0] + block_start - 1)
if region[1] is None:
end = self.lines.length()
else:
end = region[1] + block_start - 1
return start, end
def _calculate_logical(self, readline, line_number):
last_end = 1
try:
for start, end in self._logical_lines(readline):
if line_number <= end:
return (start, end)
last_end = end + 1
except tokenize.TokenError as e:
current = e.args[1][0]
return (last_end, max(last_end, current - 1))
return (last_end, None)
def _logical_lines(self, readline):
last_end = 1
for current_token in tokenize.generate_tokens(readline):
current = current_token[2][0]
if current_token[0] == token.NEWLINE:
yield (last_end, current)
last_end = current + 1
def _first_non_blank(self, line_number):
current = line_number
while current < self.lines.length():
line = self.lines.get_line(current).strip()
if line and not line.startswith('#'):
return current
current += 1
return current
def tokenizer_generator(lines):
return LogicalLineFinder(lines).generate_regions()
class CachingLogicalLineFinder(object):
def __init__(self, lines, generate=custom_generator):
self.lines = lines
self._generate = generate
_starts = None
@property
def starts(self):
if self._starts is None:
self._init_logicals()
return self._starts
_ends = None
@property
def ends(self):
if self._ends is None:
self._init_logicals()
return self._ends
def _init_logicals(self):
"""Should initialize _starts and _ends attributes"""
size = self.lines.length() + 1
self._starts = [None] * size
self._ends = [None] * size
for start, end in self._generate(self.lines):
self._starts[start] = True
self._ends[end] = True
def logical_line_in(self, line_number):
start = line_number
while start > 0 and not self.starts[start]:
start -= 1
if start == 0:
try:
start = self.starts.index(True, line_number)
except ValueError:
return (line_number, line_number)
return (start, self.ends.index(True, start))
def generate_starts(self, start_line=1, end_line=None):
if end_line is None:
end_line = self.lines.length()
for index in range(start_line, end_line):
if self.starts[index]:
yield index
def get_block_start(lines, lineno, maximum_indents=80):
"""Approximate block start"""
pattern = get_block_start_patterns()
for i in range(lineno, 0, -1):
match = pattern.search(lines.get_line(i))
if match is not None and \
count_line_indents(lines.get_line(i)) <= maximum_indents:
striped = match.string.lstrip()
# Maybe we're in a list comprehension or generator expression
if i > 1 and striped.startswith('if') or striped.startswith('for'):
bracs = 0
for j in range(i, min(i + 5, lines.length() + 1)):
for c in lines.get_line(j):
if c == '#':
break
if c in '[(':
bracs += 1
if c in ')]':
bracs -= 1
if bracs < 0:
break
if bracs < 0:
break
if bracs < 0:
continue
return i
return 1
_block_start_pattern = None
def get_block_start_patterns():
global _block_start_pattern
if not _block_start_pattern:
pattern = '^\\s*(((def|class|if|elif|except|for|while|with)\\s)|'\
'((try|else|finally|except)\\s*:))'
_block_start_pattern = re.compile(pattern, re.M)
return _block_start_pattern
def count_line_indents(line):
indents = 0
for char in line:
if char == ' ':
indents += 1
elif char == '\t':
indents += 8
else:
return indents
return 0
def get_string_pattern():
start = r'(\b[uU]?[rR]?)?'
longstr = r'%s"""(\\.|"(?!"")|\\\n|[^"\\])*"""' % start
shortstr = r'%s"(\\.|\\\n|[^"\\])*"' % start
return '|'.join([longstr, longstr.replace('"', "'"),
shortstr, shortstr.replace('"', "'")])
def get_comment_pattern():
return r'#[^\n]*' | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/codeanalyze.py | 0.432063 | 0.161221 | codeanalyze.py | pypi |
from rope.base import exceptions
class TaskHandle(object):
def __init__(self, name='Task', interrupts=True):
"""Construct a TaskHandle
If `interrupts` is `False` the task won't be interrupted by
calling `TaskHandle.stop()`.
"""
self.name = name
self.interrupts = interrupts
self.stopped = False
self.job_sets = []
self.observers = []
def stop(self):
"""Interrupts the refactoring"""
if self.interrupts:
self.stopped = True
self._inform_observers()
def current_jobset(self):
"""Return the current `JobSet`"""
if self.job_sets:
return self.job_sets[-1]
def add_observer(self, observer):
"""Register an observer for this task handle
The observer is notified whenever the task is stopped or
a job gets finished.
"""
self.observers.append(observer)
def is_stopped(self):
return self.stopped
def get_jobsets(self):
return self.job_sets
def create_jobset(self, name='JobSet', count=None):
result = JobSet(self, name=name, count=count)
self.job_sets.append(result)
self._inform_observers()
return result
def _inform_observers(self):
for observer in list(self.observers):
observer()
class JobSet(object):
def __init__(self, handle, name, count):
self.handle = handle
self.name = name
self.count = count
self.done = 0
self.job_name = None
def started_job(self, name):
self.check_status()
self.job_name = name
self.handle._inform_observers()
def finished_job(self):
self.check_status()
self.done += 1
self.handle._inform_observers()
self.job_name = None
def check_status(self):
if self.handle.is_stopped():
raise exceptions.InterruptedTaskError()
def get_active_job_name(self):
return self.job_name
def get_percent_done(self):
if self.count is not None and self.count > 0:
percent = self.done * 100 // self.count
return min(percent, 100)
def get_name(self):
return self.name
class NullTaskHandle(object):
def __init__(self):
pass
def is_stopped(self):
return False
def stop(self):
pass
def create_jobset(self, *args, **kwds):
return NullJobSet()
def get_jobsets(self):
return []
def add_observer(self, observer):
pass
class NullJobSet(object):
def started_job(self, name):
pass
def finished_job(self):
pass
def check_status(self):
pass
def get_active_job_name(self):
pass
def get_percent_done(self):
pass
def get_name(self):
pass | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/taskhandle.py | 0.794823 | 0.174094 | taskhandle.py | pypi |
import rope.base.pyobjects
from rope.base import exceptions, utils
class PyName(object):
"""References to `PyObject`\s inside python programs"""
def get_object(self):
"""Return the `PyObject` object referenced by this `PyName`"""
def get_definition_location(self):
"""Return a (module, lineno) tuple"""
class DefinedName(PyName):
def __init__(self, pyobject):
self.pyobject = pyobject
def get_object(self):
return self.pyobject
def get_definition_location(self):
return (self.pyobject.get_module(), self.pyobject.get_ast().lineno)
class AssignedName(PyName):
"""Only a placeholder"""
class UnboundName(PyName):
def __init__(self, pyobject=None):
self.pyobject = pyobject
if self.pyobject is None:
self.pyobject = rope.base.pyobjects.get_unknown()
def get_object(self):
return self.pyobject
def get_definition_location(self):
return (None, None)
class AssignmentValue(object):
"""An assigned expression"""
def __init__(self, ast_node, levels=None, evaluation='',
assign_type=False, type_hint=None):
"""The `level` is `None` for simple assignments and is
a list of numbers for tuple assignments for example in::
a, (b, c) = x
The levels for for `a` is ``[0]``, for `b` is ``[1, 0]`` and for
`c` is ``[1, 1]``.
"""
self.ast_node = ast_node
if levels is None:
self.levels = []
else:
self.levels = levels
self.evaluation = evaluation
self.assign_type = assign_type
self.type_hint = type_hint
def get_lineno(self):
return self.ast_node.lineno
class EvaluatedName(PyName):
"""A name whose object will be evaluated later"""
def __init__(self, callback, module=None, lineno=None):
self.module = module
self.lineno = lineno
self.callback = callback
self.pyobject = _Inferred(callback, _get_concluded_data(module))
def get_object(self):
return self.pyobject.get()
def get_definition_location(self):
return (self.module, self.lineno)
def invalidate(self):
"""Forget the `PyObject` this `PyName` holds"""
self.pyobject.set(None)
class ParameterName(PyName):
"""Only a placeholder"""
class ImportedModule(PyName):
def __init__(self, importing_module, module_name=None,
level=0, resource=None):
self.importing_module = importing_module
self.module_name = module_name
self.level = level
self.resource = resource
self.pymodule = _get_concluded_data(self.importing_module)
def _current_folder(self):
resource = self.importing_module.get_module().get_resource()
if resource is None:
return None
return resource.parent
def _get_pymodule(self):
if self.pymodule.get() is None:
pycore = self.importing_module.pycore
if self.resource is not None:
self.pymodule.set(pycore.project.get_pymodule(self.resource))
elif self.module_name is not None:
try:
if self.level == 0:
pymodule = pycore.project.get_module(
self.module_name, self._current_folder())
else:
pymodule = pycore.project.get_relative_module(
self.module_name, self._current_folder(),
self.level)
self.pymodule.set(pymodule)
except exceptions.ModuleNotFoundError:
pass
return self.pymodule.get()
def get_object(self):
if self._get_pymodule() is None:
return rope.base.pyobjects.get_unknown()
return self._get_pymodule()
def get_definition_location(self):
pymodule = self._get_pymodule()
if not isinstance(pymodule, rope.base.pyobjects.PyDefinedObject):
return (None, None)
return (pymodule.get_module(), 1)
class ImportedName(PyName):
def __init__(self, imported_module, imported_name):
self.imported_module = imported_module
self.imported_name = imported_name
def _get_imported_pyname(self):
try:
result = self.imported_module.get_object()[self.imported_name]
if result != self:
return result
except exceptions.AttributeNotFoundError:
pass
return UnboundName()
@utils.prevent_recursion(rope.base.pyobjects.get_unknown)
def get_object(self):
return self._get_imported_pyname().get_object()
@utils.prevent_recursion(lambda: (None, None))
def get_definition_location(self):
return self._get_imported_pyname().get_definition_location()
def _get_concluded_data(module):
if module is None:
return rope.base.pyobjects._ConcludedData()
return module._get_concluded_data()
def _circular_inference():
raise rope.base.pyobjects.IsBeingInferredError(
'Circular Object Inference')
class _Inferred(object):
def __init__(self, get_inferred, concluded=None):
self.get_inferred = get_inferred
self.concluded = concluded
if self.concluded is None:
self.temp = None
@utils.prevent_recursion(_circular_inference)
def get(self, *args, **kwds):
if self.concluded is None or self.concluded.get() is None:
self.set(self.get_inferred(*args, **kwds))
if self._get() is None:
self.set(rope.base.pyobjects.get_unknown())
return self._get()
def set(self, pyobject):
if self.concluded is not None:
self.concluded.set(pyobject)
self.temp = pyobject
def _get(self):
if self.concluded is not None:
return self.concluded.get()
return self.temp | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/pynames.py | 0.750095 | 0.271231 | pynames.py | pypi |
import os
import re
import rope.base.builtins
from rope.base import exceptions
class PyObjectToTextual(object):
"""For transforming `PyObject` to textual form
This can be used for storing `PyObjects` in files. Use
`TextualToPyObject` for converting back.
"""
def __init__(self, project):
self.project = project
def transform(self, pyobject):
"""Transform a `PyObject` to textual form"""
if pyobject is None:
return ('none',)
object_type = type(pyobject)
try:
method = getattr(self, object_type.__name__ + '_to_textual')
return method(pyobject)
except AttributeError:
return ('unknown',)
def __call__(self, pyobject):
return self.transform(pyobject)
def PyObject_to_textual(self, pyobject):
if isinstance(pyobject.get_type(), rope.base.pyobjects.AbstractClass):
result = self.transform(pyobject.get_type())
if result[0] == 'defined':
return ('instance', result)
return result
return ('unknown',)
def PyFunction_to_textual(self, pyobject):
return self._defined_to_textual(pyobject)
def PyClass_to_textual(self, pyobject):
return self._defined_to_textual(pyobject)
def _defined_to_textual(self, pyobject):
address = []
while pyobject.parent is not None:
address.insert(0, pyobject.get_name())
pyobject = pyobject.parent
return ('defined', self._get_pymodule_path(pyobject.get_module()),
'.'.join(address))
def PyModule_to_textual(self, pyobject):
return ('defined', self._get_pymodule_path(pyobject))
def PyPackage_to_textual(self, pyobject):
return ('defined', self._get_pymodule_path(pyobject))
def List_to_textual(self, pyobject):
return ('builtin', 'list', self.transform(pyobject.holding))
def Dict_to_textual(self, pyobject):
return ('builtin', 'dict', self.transform(pyobject.keys),
self.transform(pyobject.values))
def Tuple_to_textual(self, pyobject):
objects = [self.transform(holding)
for holding in pyobject.get_holding_objects()]
return tuple(['builtin', 'tuple'] + objects)
def Set_to_textual(self, pyobject):
return ('builtin', 'set', self.transform(pyobject.holding))
def Iterator_to_textual(self, pyobject):
return ('builtin', 'iter', self.transform(pyobject.holding))
def Generator_to_textual(self, pyobject):
return ('builtin', 'generator', self.transform(pyobject.holding))
def Str_to_textual(self, pyobject):
return ('builtin', 'str')
def File_to_textual(self, pyobject):
return ('builtin', 'file')
def BuiltinFunction_to_textual(self, pyobject):
return ('builtin', 'function', pyobject.get_name())
def _get_pymodule_path(self, pymodule):
return self.resource_to_path(pymodule.get_resource())
def resource_to_path(self, resource):
if resource.project == self.project:
return resource.path
else:
return resource.real_path
class TextualToPyObject(object):
"""For transforming textual form to `PyObject`"""
def __init__(self, project, allow_in_project_absolutes=False):
self.project = project
def __call__(self, textual):
return self.transform(textual)
def transform(self, textual):
"""Transform an object from textual form to `PyObject`"""
if textual is None:
return None
type = textual[0]
try:
method = getattr(self, type + '_to_pyobject')
return method(textual)
except AttributeError:
return None
def builtin_to_pyobject(self, textual):
method = getattr(self, 'builtin_%s_to_pyobject' % textual[1], None)
if method is not None:
return method(textual)
def builtin_str_to_pyobject(self, textual):
return rope.base.builtins.get_str()
def builtin_list_to_pyobject(self, textual):
holding = self.transform(textual[2])
return rope.base.builtins.get_list(holding)
def builtin_dict_to_pyobject(self, textual):
keys = self.transform(textual[2])
values = self.transform(textual[3])
return rope.base.builtins.get_dict(keys, values)
def builtin_tuple_to_pyobject(self, textual):
objects = []
for holding in textual[2:]:
objects.append(self.transform(holding))
return rope.base.builtins.get_tuple(*objects)
def builtin_set_to_pyobject(self, textual):
holding = self.transform(textual[2])
return rope.base.builtins.get_set(holding)
def builtin_iter_to_pyobject(self, textual):
holding = self.transform(textual[2])
return rope.base.builtins.get_iterator(holding)
def builtin_generator_to_pyobject(self, textual):
holding = self.transform(textual[2])
return rope.base.builtins.get_generator(holding)
def builtin_file_to_pyobject(self, textual):
return rope.base.builtins.get_file()
def builtin_function_to_pyobject(self, textual):
if textual[2] in rope.base.builtins.builtins:
return rope.base.builtins.builtins[textual[2]].get_object()
def unknown_to_pyobject(self, textual):
return None
def none_to_pyobject(self, textual):
return None
def _module_to_pyobject(self, textual):
path = textual[1]
return self._get_pymodule(path)
def _hierarchical_defined_to_pyobject(self, textual):
path = textual[1]
names = textual[2].split('.')
pymodule = self._get_pymodule(path)
pyobject = pymodule
for name in names:
if pyobject is None:
return None
if isinstance(pyobject, rope.base.pyobjects.PyDefinedObject):
try:
pyobject = pyobject.get_scope()[name].get_object()
except exceptions.NameNotFoundError:
return None
else:
return None
return pyobject
def defined_to_pyobject(self, textual):
if len(textual) == 2 or textual[2] == '':
return self._module_to_pyobject(textual)
else:
return self._hierarchical_defined_to_pyobject(textual)
def instance_to_pyobject(self, textual):
type = self.transform(textual[1])
if type is not None:
return rope.base.pyobjects.PyObject(type)
def _get_pymodule(self, path):
resource = self.path_to_resource(path)
if resource is not None:
return self.project.get_pymodule(resource)
def path_to_resource(self, path):
try:
root = self.project.address
if not os.path.isabs(path):
return self.project.get_resource(path)
if path == root or path.startswith(root + os.sep):
# INFO: This is a project file; should not be absolute
return None
import rope.base.project
return rope.base.project.get_no_project().get_resource(path)
except exceptions.ResourceNotFoundError:
return None
class DOITextualToPyObject(TextualToPyObject):
"""For transforming textual form to `PyObject`
The textual form DOI uses is different from rope's standard
textual form. The reason is that we cannot find the needed
information by analyzing live objects. This class can be
used to transform DOI textual form to `PyObject` and later
we can convert it to standard textual form using
`TextualToPyObject` class.
"""
def _function_to_pyobject(self, textual):
path = textual[1]
lineno = int(textual[2])
pymodule = self._get_pymodule(path)
if pymodule is not None:
scope = pymodule.get_scope()
inner_scope = scope.get_inner_scope_for_line(lineno)
return inner_scope.pyobject
def _class_to_pyobject(self, textual):
path, name = textual[1:]
pymodule = self._get_pymodule(path)
if pymodule is None:
return None
module_scope = pymodule.get_scope()
suspected = None
if name in module_scope.get_names():
suspected = module_scope[name].get_object()
if suspected is not None and \
isinstance(suspected, rope.base.pyobjects.PyClass):
return suspected
else:
lineno = self._find_occurrence(name,
pymodule.get_resource().read())
if lineno is not None:
inner_scope = module_scope.get_inner_scope_for_line(lineno)
return inner_scope.pyobject
def defined_to_pyobject(self, textual):
if len(textual) == 2:
return self._module_to_pyobject(textual)
else:
if textual[2].isdigit():
result = self._function_to_pyobject(textual)
else:
result = self._class_to_pyobject(textual)
if not isinstance(result, rope.base.pyobjects.PyModule):
return result
def _find_occurrence(self, name, source):
pattern = re.compile(r'^\s*class\s*' + name + r'\b')
lines = source.split('\n')
for i in range(len(lines)):
if pattern.match(lines[i]):
return i + 1
def path_to_resource(self, path):
import rope.base.libutils
relpath = rope.base.libutils.path_relative_to_project_root(
self.project, path)
if relpath is not None:
path = relpath
return super(DOITextualToPyObject, self).path_to_resource(path) | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/oi/transform.py | 0.595493 | 0.260419 | transform.py | pypi |
import rope.base.builtins
import rope.base.pynames
import rope.base.pyobjects
from rope.base import evaluate, utils, arguments
from rope.base.oi.type_hinting.factory import get_type_hinting_factory
_ignore_inferred = utils.ignore_exception(
rope.base.pyobjects.IsBeingInferredError)
@_ignore_inferred
def infer_returned_object(pyfunction, args):
"""Infer the `PyObject` this `PyFunction` returns after calling"""
object_info = pyfunction.pycore.object_info
result = object_info.get_exact_returned(pyfunction, args)
if result is not None:
return result
result = _infer_returned(pyfunction, args)
if result is not None:
if args and pyfunction.get_module().get_resource() is not None:
params = args.get_arguments(
pyfunction.get_param_names(special_args=False))
object_info.function_called(pyfunction, params, result)
return result
result = object_info.get_returned(pyfunction, args)
if result is not None:
return result
hint_return = get_type_hinting_factory(pyfunction.pycore.project).make_return_provider()
type_ = hint_return(pyfunction)
if type_ is not None:
return rope.base.pyobjects.PyObject(type_)
@_ignore_inferred
def infer_parameter_objects(pyfunction):
"""Infer the `PyObject`\s of parameters of this `PyFunction`"""
object_info = pyfunction.pycore.object_info
result = object_info.get_parameter_objects(pyfunction)
if result is None:
result = _parameter_objects(pyfunction)
_handle_first_parameter(pyfunction, result)
return result
def _handle_first_parameter(pyobject, parameters):
kind = pyobject.get_kind()
if parameters is None or kind not in ['method', 'classmethod']:
pass
if not parameters:
if not pyobject.get_param_names(special_args=False):
return
parameters.append(rope.base.pyobjects.get_unknown())
if kind == 'method':
parameters[0] = rope.base.pyobjects.PyObject(pyobject.parent)
if kind == 'classmethod':
parameters[0] = pyobject.parent
@_ignore_inferred
def infer_assigned_object(pyname):
if not pyname.assignments:
return
for assignment in reversed(pyname.assignments):
result = _infer_assignment(assignment, pyname.module)
if isinstance(result, rope.base.builtins.BuiltinUnknown) and result.get_name() == 'NotImplementedType':
break
elif result == rope.base.pyobjects.get_unknown():
break
elif result is not None:
return result
hint_assignment = get_type_hinting_factory(pyname.module.pycore.project).make_assignment_provider()
hinting_result = hint_assignment(pyname)
if hinting_result is not None:
return rope.base.pyobjects.PyObject(hinting_result)
return result
def get_passed_objects(pyfunction, parameter_index):
object_info = pyfunction.pycore.object_info
result = object_info.get_passed_objects(pyfunction,
parameter_index)
if not result:
statically_inferred = _parameter_objects(pyfunction)
if len(statically_inferred) > parameter_index:
result.append(statically_inferred[parameter_index])
return result
def _infer_returned(pyobject, args):
if args:
# HACK: Setting parameter objects manually
# This is not thread safe and might cause problems if `args`
# does not come from a good call site
pyobject.get_scope().invalidate_data()
pyobject._set_parameter_pyobjects(
args.get_arguments(pyobject.get_param_names(special_args=False)))
scope = pyobject.get_scope()
if not scope._get_returned_asts():
return
maxtries = 3
for returned_node in reversed(scope._get_returned_asts()[-maxtries:]):
try:
resulting_pyname = evaluate.eval_node(scope, returned_node)
if resulting_pyname is None:
continue
pyobject = resulting_pyname.get_object()
if pyobject == rope.base.pyobjects.get_unknown():
continue
if not scope._is_generator():
return pyobject
else:
return rope.base.builtins.get_generator(pyobject)
except rope.base.pyobjects.IsBeingInferredError:
pass
def _parameter_objects(pyobject):
result = []
params = pyobject.get_param_names(special_args=False)
hint_param = get_type_hinting_factory(pyobject.pycore.project).make_param_provider()
for name in params:
type_ = hint_param(pyobject, name)
if type_ is not None:
result.append(rope.base.pyobjects.PyObject(type_))
else:
result.append(rope.base.pyobjects.get_unknown())
return result
# handling `rope.base.pynames.AssignmentValue`
@_ignore_inferred
def _infer_assignment(assignment, pymodule):
result = _follow_pyname(assignment, pymodule)
if result is None:
return None
pyname, pyobject = result
pyobject = _follow_evaluations(assignment, pyname, pyobject)
if pyobject is None:
return None
return _follow_levels(assignment, pyobject)
def _follow_levels(assignment, pyobject):
for index in assignment.levels:
if isinstance(pyobject.get_type(), rope.base.builtins.Tuple):
holdings = pyobject.get_type().get_holding_objects()
if holdings:
pyobject = holdings[min(len(holdings) - 1, index)]
else:
pyobject = None
elif isinstance(pyobject.get_type(), rope.base.builtins.List):
pyobject = pyobject.get_type().holding
else:
pyobject = None
if pyobject is None:
break
return pyobject
@_ignore_inferred
def _follow_pyname(assignment, pymodule, lineno=None):
assign_node = assignment.type_hint or assignment.ast_node
if lineno is None:
lineno = _get_lineno_for_node(assign_node)
holding_scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
pyname = evaluate.eval_node(holding_scope, assign_node)
if pyname is not None:
result = pyname.get_object()
if isinstance(result.get_type(), rope.base.builtins.Property) and \
holding_scope.get_kind() == 'Class':
arg = rope.base.pynames.UnboundName(
rope.base.pyobjects.PyObject(holding_scope.pyobject))
return pyname, result.get_type().get_property_object(
arguments.ObjectArguments([arg]))
return pyname, result
@_ignore_inferred
def _follow_evaluations(assignment, pyname, pyobject):
new_pyname = pyname
tokens = assignment.evaluation.split('.')
for token in tokens:
call = token.endswith('()')
if call:
token = token[:-2]
if token:
pyname = new_pyname
new_pyname = _get_attribute(pyobject, token)
if new_pyname is not None:
pyobject = new_pyname.get_object()
if pyobject is not None and call:
if isinstance(pyobject, rope.base.pyobjects.AbstractFunction):
args = arguments.ObjectArguments([pyname])
pyobject = pyobject.get_returned_object(args)
else:
pyobject = None
if pyobject is None:
break
if pyobject is not None and assignment.assign_type:
return rope.base.pyobjects.PyObject(pyobject)
return pyobject
def _get_lineno_for_node(assign_node):
if hasattr(assign_node, 'lineno') and \
assign_node.lineno is not None:
return assign_node.lineno
return 1
def _get_attribute(pyobject, name):
if pyobject is not None and name in pyobject:
return pyobject[name] | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/oi/soi.py | 0.519034 | 0.182863 | soi.py | pypi |
from rope.base.oi.type_hinting import interfaces
from rope.base.oi.type_hinting.providers import (
composite, inheritance, docstrings, numpydocstrings, pep0484_type_comments
)
from rope.base.oi.type_hinting.resolvers import composite as composite_resolvers, types
from rope.base import utils
class TypeHintingFactory(interfaces.ITypeHintingFactory):
@utils.saveit
def make_param_provider(self):
providers = [
docstrings.ParamProvider(docstrings.DocstringParamParser(), self.make_resolver()),
docstrings.ParamProvider(numpydocstrings.NumPyDocstringParamParser(), self.make_resolver()),
]
return inheritance.ParamProvider(composite.ParamProvider(*providers))
@utils.saveit
def make_return_provider(self):
providers = [
docstrings.ReturnProvider(docstrings.DocstringReturnParser(), self.make_resolver()),
]
return inheritance.ReturnProvider(composite.ReturnProvider(*providers))
@utils.saveit
def make_assignment_provider(self):
providers = [
pep0484_type_comments.AssignmentProvider(self.make_resolver()),
docstrings.AssignmentProvider(docstrings.DocstringParamParser(), self.make_resolver()),
docstrings.AssignmentProvider(numpydocstrings.NumPyDocstringParamParser(), self.make_resolver()),
]
return inheritance.AssignmentProvider(composite.AssignmentProvider(*providers))
@utils.saveit
def make_resolver(self):
"""
:rtype: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
resolvers = [
types.Resolver(),
]
return composite_resolvers.Resolver(*resolvers)
default_type_hinting_factory = TypeHintingFactory()
class TypeHintingFactoryAccessor(object):
def __call__(self, project):
"""
:type project: rope.base.project.Project
:rtype: rope.base.oi.type_hinting.interfaces.ITypeHintingFactory
"""
factory_location = project.get_prefs().get(
'type_hinting_factory',
'rope.base.oi.type_hinting.factory.default_type_hinting_factory'
)
return self._get_factory(factory_location)
@utils.cached(10)
def _get_factory(self, factory_location):
"""
:type factory_location: str
:rtype: rope.base.oi.type_hinting.interfaces.ITypeHintingFactory
"""
return utils.resolve(factory_location)
get_type_hinting_factory = TypeHintingFactoryAccessor() | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/oi/type_hinting/factory.py | 0.732879 | 0.211682 | factory.py | pypi |
import re
from rope.base.oi.type_hinting import utils
from rope.base.oi.type_hinting.providers import interfaces
class ParamProvider(interfaces.IParamProvider):
def __init__(self, docstring_parser, resolver):
"""
:type docstring_parser: rope.base.oi.type_hinting.providers.docstrings.IParamParser
:type resolver: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
self._parse_docstring = docstring_parser
self._resolve = resolver
def __call__(self, pyfunc, param_name):
"""
:type pyfunc: rope.base.pyobjectsdef.PyFunction
:type param_name: str
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
type_strs = self._parse_docstring(pyfunc.get_doc(), param_name)
if type_strs:
return self._resolve(type_strs[0], pyfunc)
class ReturnProvider(interfaces.IReturnProvider):
def __init__(self, docstring_parser, resolver):
"""
:type docstring_parser: rope.base.oi.type_hinting.providers.docstrings.IReturnParser
:type resolver: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
self._parse_docstring = docstring_parser
self._resolve = resolver
def __call__(self, pyfunc):
"""
:type pyfunc: rope.base.pyobjectsdef.PyFunction
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
type_strs = self._parse_docstring(pyfunc.get_doc())
if type_strs:
return self._resolve(type_strs[0], pyfunc)
class AssignmentProvider(interfaces.IAssignmentProvider):
def __init__(self, docstring_parser, resolver):
"""
:type docstring_parser: rope.base.oi.type_hinting.providers.docstrings.IParamParser
:type resolver: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
self._parse_docstring = docstring_parser
self._resolve = resolver
def __call__(self, pyname):
"""
:type pyname: rope.base.pynamesdef.AssignedName
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
try:
pyclass, attr_name = utils.get_class_with_attr_name(pyname)
except TypeError:
return
else:
type_strs = self._parse_docstring(pyclass.get_doc(), attr_name)
if type_strs:
return self._resolve(type_strs[0], pyclass)
class IParamParser(object):
def __call__(self, docstring, param_name):
"""
:type docstring: str
:type param_name: str
"""
class IReturnParser(object):
def __call__(self, docstring):
"""
:type docstring: str
"""
class DocstringParamParser(IParamParser):
DOCSTRING_PARAM_PATTERNS = [
r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
r'\s*:param\s+(\w+)\s+%s:[^\n]+', # Sphinx param with type
r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc
]
def __init__(self):
self._strip_rst_role = RSTRoleStrip()
def __call__(self, docstring, param_name):
"""Search `docstring` for type(-s) of `param_name`.
>>> DocstringParamParser()(':type param: int', 'param')
['int']
>>> DocstringParamParser()('@type param: int', 'param')
['int']
>>> DocstringParamParser()(':type param: :class:`threading.Thread`', 'param')
['threading.Thread']
>>> bool(DocstringParamParser()('no document', 'param'))
False
>>> DocstringParamParser()(':param int param: some description', 'param')
['int']
"""
if not docstring:
return []
patterns = [re.compile(p % re.escape(param_name))
for p in self.DOCSTRING_PARAM_PATTERNS]
for pattern in patterns:
match = pattern.search(docstring)
if match:
return [self._strip_rst_role(match.group(1))]
return []
class DocstringReturnParser(IReturnParser):
DOCSTRING_RETURN_PATTERNS = [
re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx
re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc
]
def __init__(self):
self._strip_rst_role = RSTRoleStrip()
def __call__(self, docstring):
if not docstring:
return []
for p in self.DOCSTRING_RETURN_PATTERNS:
match = p.search(docstring)
if match:
return [self._strip_rst_role(match.group(1))]
return []
class RSTRoleStrip(object):
RST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
def __call__(self, type_str):
"""
Strip off the part looks like a ReST role in `type_str`.
>>> RSTRoleStrip()(':class:`ClassName`') # strip off :class:
'ClassName'
>>> RSTRoleStrip()(':py:obj:`module.Object`') # works with domain
'module.Object'
>>> RSTRoleStrip()('ClassName') # do nothing when not ReST role
'ClassName'
See also:
http://sphinx-doc.org/domains.html#cross-referencing-python-objects
"""
match = self.RST_ROLE_PATTERN.match(type_str)
if match:
return match.group(1)
else:
return type_str | /ropee-0.13.3.tar.gz/ropee-0.13.3/rope/base/oi/type_hinting/providers/docstrings.py | 0.673943 | 0.197793 | docstrings.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.