id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
127766
|
import falcon
import pytest
from falcon import testing
from poseidon_api.api import api
@pytest.fixture
def client():
return testing.TestClient(api)
def test_v1(client):
response = client.simulate_get('/v1')
assert response.status == falcon.HTTP_OK
def test_network(client):
response = client.simulate_get('/v1/network')
assert len(response.json) == 2
assert response.status == falcon.HTTP_OK
def test_network_by_ip(client):
response = client.simulate_get('/v1/network/10.0.0.1')
assert len(response.json['dataset']) == 0
assert response.status == falcon.HTTP_OK
def test_network_full(client):
response = client.simulate_get('/v1/network_full')
assert len(response.json) == 1
assert response.status == falcon.HTTP_OK
def test_info(client):
response = client.simulate_get('/v1/info')
assert response.status == falcon.HTTP_OK
|
127793
|
import pyredner
import torch
pyredner.set_use_gpu(torch.cuda.is_available())
position = torch.tensor([1.0, 0.0, -3.0])
look_at = torch.tensor([1.0, 0.0, 0.0])
up = torch.tensor([0.0, 1.0, 0.0])
fov = torch.tensor([45.0])
clip_near = 1e-2
# randomly generate distortion parameters
torch.manual_seed(1234)
target_distort_params = (torch.rand(8) - 0.5) * 0.05
resolution = (256, 256)
cam = pyredner.Camera(position = position,
look_at = look_at,
up = up,
fov = fov,
clip_near = clip_near,
resolution = resolution,
distortion_params = target_distort_params)
checkerboard_texture = pyredner.imread('scenes/teapot.png')
if pyredner.get_use_gpu():
checkerboard_texture = checkerboard_texture.cuda(device = pyredner.get_device())
mat_checkerboard = pyredner.Material(\
diffuse_reflectance = checkerboard_texture)
mat_black = pyredner.Material(\
diffuse_reflectance = torch.tensor([0.0, 0.0, 0.0], device = pyredner.get_device()))
plane = pyredner.Object(vertices = torch.tensor([[-1.0,-1.0, 0.0],
[-1.0, 1.0, 0.0],
[ 1.0,-1.0, 0.0],
[ 1.0, 1.0, 0.0]],
device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2],
[1, 3, 2]],
dtype = torch.int32,
device = pyredner.get_device()),
uvs = torch.tensor([[0.05, 0.05],
[0.05, 0.95],
[0.95, 0.05],
[0.95, 0.95]], device = pyredner.get_device()),
material = mat_checkerboard)
scene = pyredner.Scene(camera=cam, objects=[plane])
img = pyredner.render_albedo(scene=scene)
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/target.png')
# Read the target image we just saved.
target = pyredner.imread('results/test_camera_distortion/target.exr')
if pyredner.get_use_gpu():
target = target.cuda(device = pyredner.get_device())
cam.distortion_params = torch.zeros(8, requires_grad = True)
scene = pyredner.Scene(camera=cam, objects=[plane])
img = pyredner.render_albedo(scene=scene)
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/init.exr')
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/init.png')
# Optimize for triangle vertices.
optimizer = torch.optim.Adam([cam.distortion_params], lr=1e-3)
for t in range(200):
print('iteration:', t)
optimizer.zero_grad()
scene = pyredner.Scene(camera=cam, objects=[plane])
img = pyredner.render_albedo(scene=scene)
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/iter_{}.png'.format(t))
loss = (img - target).pow(2).sum()
print('loss:', loss.item())
loss.backward()
print('grad:', cam.distortion_params.grad)
optimizer.step()
print('distortion_params:', cam.distortion_params)
img = pyredner.render_albedo(scene=scene)
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/final.exr')
pyredner.imwrite(img.cpu(), 'results/test_camera_distortion/final.png')
# Convert the intermediate renderings to a video.
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/test_camera_distortion/iter_%d.png", "-vb", "20M",
"results/test_camera_distortion/out.mp4"])
|
127802
|
from pathlib import Path
import pytest
from ics import Calendar, Event
from pythoncz.models.events import (preprocess_ical, find_first_url,
set_url_from_description)
def test_preprocess_ical():
path = Path(__file__).parent / 'invalid_ical.ics'
lines = preprocess_ical(path.read_text())
calendar = Calendar(lines)
assert calendar
assert calendar.events[0]
# there are two alarms in the file, one type AUDIO and one type NONE
# NONE has to be removed because ics can't parse it
# the other one should not be removed
assert len(calendar.events[0].alarms) == 1
assert calendar.events[0].alarms[0].action == "AUDIO"
@pytest.mark.parametrize('text,expected', [
(None, None),
('', None),
('lorem ipsum dolor sit amet', None),
('https://python.cz', 'https://python.cz'),
('http://python.cz', 'http://python.cz'),
('lorem ipsum https://python.cz dolor sit amet', 'https://python.cz'),
('lorem https://python.cz ipsum https://pyvo.cz', 'https://python.cz'),
])
def test_find_first_url(text, expected):
assert find_first_url(text) == expected
@pytest.mark.parametrize('event,expected_url', [
(Event(), None),
(Event(url='https://python.cz'), 'https://python.cz'),
(Event(description='https://pyvo.cz', url='https://python.cz'),
'https://python.cz'),
(Event(description='https://pyvo.cz'), 'https://pyvo.cz'),
(Event(description='''
See: https://www.meetup.com/PyData-Prague/events/257775220
Looking forward to see you!
'''),
'https://www.meetup.com/PyData-Prague/events/257775220'),
])
def test_set_url_from_description(event, expected_url):
assert set_url_from_description(event).url == expected_url
|
127807
|
import logging
from typing import Optional
log: logging.Logger = logging.getLogger(__name__)
class Object:
"""
Represents a generic Call of Duty object.
Parameters
----------
client : callofduty.Client
Client which manages communication with the Call of Duty API.
"""
_type: Optional[str] = None
def __init__(self, client):
self._client = client
@property
def type(self) -> Optional[str]:
return self._type
def __repr__(self) -> str:
return f"<{self.__class__.__name__}>"
def __str__(self) -> str:
return self.__repr__()
|
127810
|
import sys
import unittest
import pendulum
from src import (
Crypto,
CryptoCommandService,
)
from minos.networks import (
InMemoryRequest,
Response,
)
from tests.utils import (
build_dependency_injector,
)
class TestCryptoCommandService(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
self.injector = build_dependency_injector()
async def asyncSetUp(self) -> None:
await self.injector.wire(modules=[sys.modules[__name__]])
async def asyncTearDown(self) -> None:
await self.injector.unwire()
def test_constructor(self):
service = CryptoCommandService()
self.assertIsInstance(service, CryptoCommandService)
async def test_remote_crypto(self):
now = pendulum.now()
now_minus_one_month = now.subtract(months=1)
service = CryptoCommandService()
response = service.call_remote("BTC/USD", now_minus_one_month.to_datetime_string())
self.assertIsInstance(service, CryptoCommandService)
if __name__ == "__main__":
unittest.main()
|
127833
|
import argparse
import xarray as xr
import numpy as np
import xesmf as xe
from glob import glob
import os
import shutil
def add_2d(
ds,
):
"""
Regrid horizontally.
:param ds: Input xarray dataset
"""
ds['lat2d'] = ds.lat.expand_dims({'lon': ds.lon}).transpose()
ds['lon2d'] = ds.lon.expand_dims({'lat': ds.lat})
return ds
def convert_z_to_orography(ds):
"""
Convert geopotential of surface to height in meters
Args:
ds: Input dataset
Returns:
ds: Same dataset with orography instead of z
"""
ds['z'] = ds.z / 9.80665
ds = ds.rename({'z': 'orography'})
ds.orography.attrs['units'] = 'm'
return ds
def main(
input_fns,
custom_fn=None,
):
"""
:param input_fns: Input files. Can use *. If more than one, loop over them
:param custom_fn: If not None, use custom file name. Otherwise infer from parameters.
"""
# Get files for starred expressions
if '*' in input_fns[0]:
input_fns = sorted(glob(input_fns[0]))
# Loop over input files
for fn in input_fns:
print(f'Extracting from file: {fn}')
ds = xr.open_dataset(fn).isel(time=0).drop('time')
ds = convert_z_to_orography(add_2d(ds))
fn_out = (
custom_fn or fn
)
print(f"Saving file: {fn_out}")
ds.to_netcdf(fn_out+'.tmp')
ds.close()
shutil.move(fn_out+'.tmp', fn_out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_fns',
type=str,
nargs='+',
help="Input files (full path). Can use *. If more than one, loop over them",
required=True
)
parser.add_argument(
'--custom_fn',
type=str,
help="If not None, use custom file name. Otherwise infer from parameters.",
default=None
)
args = parser.parse_args()
main(
input_fns=args.input_fns,
custom_fn=args.custom_fn,
)
|
127862
|
from setuptools import find_packages, setup
from beacon_api import __license__, __version__, __author__, __description__
setup(
name="beacon_api",
version=__version__,
url="https://beacon-python.rtfd.io/",
project_urls={
"Source": "https://github.com/CSCfi/beacon-python",
},
license=__license__,
author=__author__,
author_email="",
description=__description__,
long_description="",
packages=find_packages(exclude=["tests", "docs"]),
# If any package contains *.json, or config in *.ini, include them:
package_data={"": ["*.json", "*.ini"]},
include_package_data=True,
entry_points={
"console_scripts": [
"beacon=beacon_api.app:main",
"beacon_init=beacon_api.utils.db_load:main",
]
},
platforms="any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Information Technology",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
install_requires=[
"asyncpg==0.25.0",
"aiohttp==3.8.1",
"Authlib==0.15.5",
"aiohttp-cors==0.7.0",
"jsonschema==4.3.3",
"gunicorn==20.1.0",
"uvloop==0.16.0",
"cyvcf2==0.30.14",
"aiocache==0.11.1",
"ujson==5.1.0",
],
extras_require={
"vcf": [
"numpy==1.22.0",
"cyvcf2==0.30.14",
"Cython==0.29.26",
],
"test": [
"coverage==6.2",
"pytest<6.3",
"pytest-cov==3.0.0",
"testfixtures==6.18.3",
"tox==3.24.5",
"flake8==4.0.1",
"flake8-docstrings==1.6.0",
"aioresponses==0.7.2",
"black==21.12b0",
],
"docs": ["sphinx >= 1.4", "sphinx_rtd_theme==1.0.0"],
},
)
|
127886
|
import zengl
class Context:
context = None
main_uniform_buffer = None
main_uniform_buffer_data = bytearray(b'\x00' * 64)
@classmethod
def initialize(cls):
ctx = zengl.context()
cls.context = ctx
cls.main_uniform_buffer = ctx.buffer(size=64)
ctx.includes['main_uniform_buffer'] = '''
layout (std140) uniform MainUniformBuffer {
mat4 mvp;
};
'''
@classmethod
def update_camera(cls, eye, target, aspect, fov):
cls.main_uniform_buffer_data[0:64] = zengl.camera(eye, target, aspect=aspect, fov=fov)
@classmethod
def flush_uniform_buffer(cls):
cls.main_uniform_buffer.write(cls.main_uniform_buffer_data)
|
127920
|
import re
from collections import defaultdict, Counter
from numbers import Number
from typing import Optional, List, Dict
import numpy as np
import torch
from allennlp.common import FromParams, Registrable
from dataclasses import dataclass, replace
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
import third_party.detection_metrics.lib.Evaluator as det_evaluator
from gpv2.data.dataset import VqaExample, ClsExample, WebQaExample, LocalizationExample, \
CaptioningExample
from gpv2.data.gpv_datasets import COCO_CATEGORIES
from gpv2.data.synonyms import SYNONYMS
from gpv2.model.model import GPVExampleOutput
from gpv2.train.vqa2_eval_data import punct, periodStrip, commaStrip, manualMap, articles, \
contractions
from gpv2.utils import py_utils
from gpv2.utils.image_utils import get_image_size
from gpv2.utils.quiet_ptbtokenizer import QuitePTBTokenizer
def vqa_score(answer, ground_truth_answer_counts):
gt_answers = {k.lower(): v for k, v in ground_truth_answer_counts.items()}
return min(gt_answers.get(answer, 0) / 3, 1)
@dataclass(frozen=True)
class ResultKey(FromParams):
"""Key for a result from a model"""
metric_name: str
subset_name: Optional[str] = None
dataset_name: Optional[str] = None
def __str__(self):
out = [self.dataset_name, self.subset_name, self.metric_name]
return "/".join(x for x in out if x is not None)
def __repr__(self):
return str(self)
class Evaluator(Registrable):
"""Computes evaluations metrics"""
def evaluate(
self, examples: List, predictions: Dict[str, GPVExampleOutput],
allow_partial=False, subset_mapping=None
) -> Dict[ResultKey, Number]:
"""Computes corpus wide metrics
:param examples: List of source examples
:param predictions: example key -> model output
:param allow_partial: Allow the predictions to only cover a subset of `examples`,
in which only those predictions should be evaluated
:param subset_mapping: Function that maps example -> list of strings, names of the subsets that
example is part of
"""
raise NotImplementedError()
class PerExampleEvaluator(Evaluator):
"""Computes per-examples evaluations metrics"""
def evaluate_examples(self, examples: List, predictions: Dict[str, GPVExampleOutput])-> List[Dict[str, Number]]:
raise NotImplementedError()
def evaluate(
self,
examples: List,
predictions: Dict[str, GPVExampleOutput],
allow_partial=False,
mean=True,
subset_mapping=None
) -> Dict[ResultKey, Number]:
examples_with_predictions = [x for x in examples if x.get_gpv_id() in predictions]
if not allow_partial and (len(examples) != len(examples_with_predictions)):
raise ValueError(f"Only {len(examples_with_predictions)}/{len(examples)} "
f"of examples have predictions")
examples = examples_with_predictions
per_example_scores = self.evaluate_examples(examples, predictions)
per_metric_scores = py_utils.transpose_list_of_dicts(per_example_scores)
subsets = defaultdict(list)
all_ids = [x.get_gpv_id() for x in examples]
id_to_ix = {k: i for i, k in enumerate(all_ids)}
subsets[None] = list(range(len(all_ids)))
if subset_mapping is not None:
for example in examples:
example_id = id_to_ix[example.get_gpv_id()]
for subset in subset_mapping(example):
subsets[subset].append(example_id)
out = {}
for metric_name, score in per_metric_scores.items():
score = np.array(score)
for subset_name, ixs in subsets.items():
if mean:
out[ResultKey(metric_name, subset_name)] = float(np.mean(score[ixs]))
else:
out[ResultKey(metric_name, subset_name)] = (float(np.sum(score[ixs])), len(ixs))
return out
@Evaluator.register("vqa-evaluator")
class VqaEvaluator(PerExampleEvaluator):
def evaluate_examples(self, examples: List[VqaExample],
predictions: Dict[str, GPVExampleOutput], add_scores=False):
out = []
for example in examples:
answer = predictions[example.gpv_id].text[0]
score = vqa_score(answer.lower(), example.answers)
out.append(dict(score=score))
return out
@Evaluator.register("cls-evaluator")
class ClsEvaluator(PerExampleEvaluator):
def evaluate_examples(self, examples: List[ClsExample], predictions: Dict[str, GPVExampleOutput]):
out = []
for example in examples:
answer = predictions[example.gpv_id].text[0].lower()
gt_answer = SYNONYMS[example.category]
out.append(dict(accuracy=answer in gt_answer))
return out
@Evaluator.register("webqa-evaluator")
class WebQaEvaluator(PerExampleEvaluator):
def evaluate_examples(self, examples: List[WebQaExample], predictions: Dict[str, GPVExampleOutput]):
out = []
for example in examples:
answer = predictions[example.get_gpv_id()].text[0].lower()
gt_answer = SYNONYMS[example.answer] if example.answer in SYNONYMS else [example.answer]
out.append(dict(accuracy=answer in gt_answer))
return out
@Evaluator.register("dce-cls")
class DceClsEvaluator(PerExampleEvaluator):
def __init__(self, top_k: Optional[List[int]]=(5,)):
self.top_k = top_k
def evaluate_examples(self, examples: List[ClsExample], predictions: Dict[str, GPVExampleOutput]):
out = []
for example in examples:
answers = [x.lower() for x in predictions[example.get_gpv_id()].text]
gt = [example.category]
vals = {"accuracy": answers[0] in gt}
if self.top_k is not None:
for k in self.top_k:
assert len(answers) >= k
vals[f"top{k}-acc"] = any(a in gt for a in answers[:k])
out.append(vals)
return out
def compute_vqa_accuracy(
gt_answers: List[str],
pred_answers: List[str]) -> List[float]:
ngt_answers = [preprocess_answer(ans) for ans in gt_answers]
topk_npred_answers = [preprocess_answer(ans) for ans in pred_answers]
gt_consensus = Counter(ngt_answers)
return [vqa_accuracy(ans, gt_consensus) for ans in topk_npred_answers]
def vqa_accuracy(npred_answer: str, gt_consensus: Counter):
return min(gt_consensus[npred_answer]/3,1)
def processPunctuation(inText):
outText = inText
for p in punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = periodStrip.sub("",outText,re.UNICODE)
return outText
def processDigitArticle(inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = manualMap.setdefault(word, word)
if word not in articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in contractions:
outText[wordId] = contractions[word]
outText = ' '.join(outText)
return outText
def preprocess_answer(ans):
ans = ans.replace('\n', ' ')
ans = ans.replace('\t',' ')
ans = ans.lower().strip()
return processDigitArticle(processPunctuation(ans))
@Evaluator.register("opensce-vqa")
class DceVqaEvaluator(PerExampleEvaluator):
def __init__(self, top_k: Optional[List[int]]=(5,)):
self.top_k = top_k
if top_k is not None:
assert all(x > 0 for x in top_k)
def evaluate_examples(self, examples: List[VqaExample], predictions: Dict[str, GPVExampleOutput]):
out = []
for example in examples:
max_k = 1 if self.top_k is None else max(self.top_k)
answers = predictions[example.get_gpv_id()].text[:max_k]
gt = example.answers
scores = compute_vqa_accuracy(gt, answers)
vals = dict(acc=scores[0])
if self.top_k:
for k in self.top_k:
if k > len(scores):
raise ValueError(f"Cannot evaluate top-{k}, but only have top-{len(scores)} predictions")
vals[f"top{k}-acc"] = max(scores[:k])
out.append(vals)
return out
@Evaluator.register("localization-evaluator")
@Evaluator.register("detect-evaluator")
class LocalizationEvaluator(PerExampleEvaluator):
def __init__(self, iou_thresh=0.5):
self.iou_thresh = iou_thresh
def evaluate_examples(self, examples: List[LocalizationExample], predictions: Dict[str, GPVExampleOutput],
return_pr=False):
eval_engine = det_evaluator.Evaluator()
out = []
for i, ex in enumerate(examples):
pred = predictions[ex.gpv_id]
scores = pred.relevance
pred_boxes = pred.boxes.copy()
gt_boxes = np.array(ex.bboxes)
# Convert cx cy, w, h -> x1, y1, w, h
pred_boxes[:, 0] = pred_boxes[:, 0] - 0.5 * pred_boxes[:, 2]
pred_boxes[:, 1] = pred_boxes[:, 1] - 0.5 * pred_boxes[:, 3]
B = pred_boxes.shape[0]
all_boxes = det_evaluator.BoundingBoxes()
W, H = get_image_size(ex.image_id)
for b in range(B):
x, y, w, h = pred_boxes[b]
all_boxes.addBoundingBox(det_evaluator.BoundingBox(
imageName=ex.image_id,
classId=ex.category,
x=x,
y=y,
w=w,
h=h,
typeCoordinates=det_evaluator.CoordinatesType.Relative,
imgSize=(W, H),
bbType=det_evaluator.BBType.Detected,
classConfidence=scores[b],
format=det_evaluator.BBFormat.XYWH))
normalized_gt = all(all(val <= 1.0 for val in b) for b in gt_boxes)
if not normalized_gt:
# convert to relative coordinates
# TODO its a bit of hack to check this by looking coordinates > 1.0
# but we need this check atm since DCE stores relative scaling
# coco uses absolute
W, H = get_image_size(ex.image_id)
gt_boxes[:, 0] = gt_boxes[:, 0] / W
gt_boxes[:, 1] = gt_boxes[:, 1] / H
gt_boxes[:, 2] = gt_boxes[:, 2] / W
gt_boxes[:, 3] = gt_boxes[:, 3] / H
B = gt_boxes.shape[0]
for b in range(B):
x, y, w, h = gt_boxes[b]
all_boxes.addBoundingBox(det_evaluator.BoundingBox(
imageName=ex.image_id,
classId=ex.category,
x=x,
y=y,
w=w,
h=h,
typeCoordinates=det_evaluator.CoordinatesType.Relative,
imgSize=(W, H),
bbType=det_evaluator.BBType.GroundTruth,
format=det_evaluator.BBFormat.XYWH))
det_metrics = eval_engine.GetPascalVOCMetrics(all_boxes, self.iou_thresh)
if return_pr:
out.append(det_metrics[0])
else:
out.append({"AP": det_metrics[0]['AP']})
return out
def get_per_caption_data(examples: List[CaptioningExample], predictions):
# In per-caption evaluation the model makes one prediction for each ground truth
# caption, each of which it still evaluated against all the captions,
caption_examples = []
caption_predictions = {}
for ex in examples:
pred = predictions[ex.gpv_id]
for cap in ex.captions:
caption_examples.append(CaptioningExample(cap.gpv_id, ex.image_id, ex.captions))
caption_predictions[cap.gpv_id] = pred
return caption_examples, caption_predictions
@Evaluator.register("cap-evaluator")
class CaptionEvaluator(Evaluator):
def __init__(self, cider=True, bleu=4, per_caption=False):
self.cider = cider
self.bleu = bleu
self.per_caption = per_caption
scorers = {}
if cider:
# from exp.ours.eval.fast_cider import FastCider
scorers["cider"] = Cider()
if bleu:
scorers["bleu"] = Bleu(bleu)
self.scorers = scorers
self.tokenizer = QuitePTBTokenizer()
def evaluate(
self,
examples: List,
predictions: Dict[str, GPVExampleOutput],
allow_partial=False,
subset_mapping=None,
):
examples_with_predictions = [x for x in examples if x.get_gpv_id() in predictions]
if not allow_partial and (len(examples) != len(examples_with_predictions)):
raise ValueError(f"Only {len(examples_with_predictions)}/{len(examples)} "
f"of examples have predictions")
examples = examples_with_predictions
if self.per_caption:
examples, predictions = get_per_caption_data(examples, predictions)
subsets = defaultdict(list)
subsets[None] = examples
if subset_mapping is not None:
for example in examples:
example_subsets = subset_mapping(example)
for subset in example_subsets:
subsets[subset].append(example)
out = {}
for subset_name, examples in subsets.items():
all_scores = self._get_scores(examples, predictions)
results = {}
for name, scorer in self.scorers.items():
corpus_scores, _ = all_scores[name]
if isinstance(scorer, Cider):
results["cider"] = corpus_scores
elif isinstance(scorer, Bleu):
scores, _ = all_scores[name]
for i, score in enumerate(corpus_scores):
results[f"bleu{i+1}"] = score
if subset_name is not None:
results["n"] = len(examples)
out.update({ResultKey(metric_name=k, subset_name=subset_name): v for k, v in results.items()})
return out
def evaluate_examples(self, examples: List[CaptioningExample], predictions: Dict[str, GPVExampleOutput]):
all_scores = self._get_scores(examples, predictions)
per_examples_scores = [{} for _ in examples]
for name, scorer in self.scorers.items():
score, scores = all_scores[name]
if isinstance(scorer, Cider):
for score, ex_scores in zip(scores, per_examples_scores):
ex_scores["cider"] = score
elif isinstance(scorer, Bleu):
scores = py_utils.transpose_lists(scores)
for score, ex_scores in zip(scores, per_examples_scores):
for i, s in enumerate(score):
ex_scores[f"bleu{i+1}"] = s
return per_examples_scores
def _get_scores(self, examples: List[CaptioningExample], predictions: Dict[str, GPVExampleOutput]):
gts = {}
res = {}
for ix, instance in enumerate(examples):
key = instance.get_gpv_id()
assert key not in res
res[key] = [predictions[instance.get_gpv_id()].text[0]]
gts[key] = [x.caption.lower() for x in instance.captions]
res = self.tokenizer.tokenize(res)
gts = self.tokenizer.tokenize(gts)
scores = {}
for name, scorer in self.scorers.items():
if isinstance(scorer, Bleu):
scores[name] = scorer.compute_score(gts, res, verbose=0)
else:
scores[name] = scorer.compute_score(gts, res)
return scores
|
127933
|
import unittest
import numpy as np
from pax import core, plugin
from pax.datastructure import Event, Peak
class TestPosRecMaxPMT(unittest.TestCase):
def setUp(self):
self.pax = core.Processor(config_names='XENON100', just_testing=True, config_dict={'pax': {
'plugin_group_names': ['test'],
'test': 'MaxPMT.PosRecMaxPMT'}})
self.plugin = self.pax.get_plugin_by_name('PosRecMaxPMT')
def tearDown(self):
delattr(self, 'pax')
delattr(self, 'plugin')
@staticmethod
def example_event(channels_with_something, area_per_channel=1):
bla = np.zeros(243)
bla[np.array(channels_with_something)] = area_per_channel
e = Event.empty_event()
e.peaks.append(Peak({'left': 5,
'right': 9,
'type': 'S2',
'detector': 'tpc',
'area_per_channel': bla}))
return e
def test_get_maxpmt_plugin(self):
self.assertIsInstance(self.plugin, plugin.TransformPlugin)
self.assertEqual(self.plugin.__class__.__name__, 'PosRecMaxPMT')
def test_posrec(self):
"""Test a hitpattern of all ones and one 2 (at PMT 42)"""
ch = 42 # Could test more locations, little point
hitp = np.ones(len(self.plugin.config['channels_top']))
hitp[ch] = 2
e = self.example_event(channels_with_something=self.plugin.config['channels_top'],
area_per_channel=hitp)
e = self.plugin.transform_event(e)
self.assertIsInstance(e, Event)
self.assertEqual(len(e.peaks), 1)
self.assertEqual(len(e.S2s()), 1)
self.assertEqual(len(e.peaks[0].reconstructed_positions), 1)
rp = e.peaks[0].reconstructed_positions[0]
self.assertEqual(rp.algorithm, self.plugin.name)
self.assertEqual(rp.x, self.plugin.config['pmts'][ch]['position']['x'])
self.assertEqual(rp.y, self.plugin.config['pmts'][ch]['position']['y'])
if __name__ == '__main__':
unittest.main()
|
127988
|
from __future__ import annotations
import pytest
from _pytest.config import Config
from docutils import __version__ as docutils_version
from sphinx import __display_version__ as sphinx_version
from sphinx.testing.path import path
pytest_plugins = "sphinx.testing.fixtures"
collect_ignore = ["roots"]
def pytest_report_header(config: Config) -> str: # noqa: U100
return f"libraries: Sphinx-{sphinx_version}, docutils-{docutils_version}"
@pytest.fixture(scope="session", name="rootdir")
def root_dir() -> path:
return path(__file__).parent.parent.abspath() / "roots"
def pytest_configure(config: Config) -> None:
config.addinivalue_line("markers", "prepare")
|
128189
|
import strawberry
from pythonit_toolkit.api.extensions import SentryExtension
from users.admin_api.mutation import Mutation
from users.admin_api.query import Query
schema = strawberry.federation.Schema(
query=Query, mutation=Mutation, extensions=[SentryExtension]
)
|
128207
|
import glob, os
import sys
import urllib
from pathlib import Path
if __name__ == "__main__":
os.chdir(os.path.dirname(sys.argv[0]))
lecture_toc_md = []
lecture_root = "../lectures"
weeks = [week for week in os.listdir(lecture_root) if week.lower().startswith('week') and not week.lower().endswith('.md')]
weeks.sort()
lecture_toc_title = "Lectures"
lecture_toc_md.append("---")
lecture_toc_md.append("layout: default")
lecture_toc_md.append(f"title: {lecture_toc_title}")
lecture_toc_md.append("nav_order: 3")
lecture_toc_md.append("has_children: true")
lecture_toc_md.append("has_toc: false")
lecture_toc_md.append("permalink: /lectures")
lecture_toc_md.append("---")
# lecture_toc_md.append("")
week_nav_order = 1
# todo : delete all md files
for week_title in weeks:
week_path = os.path.join(lecture_root, week_title)
lecture_toc_md.append("")
lecture_toc_md.append(f"## {week_title}")
lecture_toc_md.append("")
with open(week_path + '.md', 'w') as week_file:
week_md = [
f"---",
f"layout: default",
f"title: {week_title}",
f"parent: {lecture_toc_title}",
f"has_children: true",
f"nav_order: {week_nav_order}",
f"---",
f"",
]
week_file.write('\n'.join(week_md))
week_nav_order += 1
files = os.listdir(week_path)
files = [file for file in files if file.endswith('.md')]
files.sort()
file_nav_order = 1
for file in files:
lecture_md_path = os.path.join(week_title, file)
# todo : figure out why this broke
ipynb_root = "https://github.com/BrianKolowitz/data-focused-python/blob/master/lectures"
ipynb_route = os.path.join(week_title, file[:-3] + ".ipynb")
ipynb_route = urllib.parse.quote(ipynb_route)
lecture_ipynb_path = os.path.join(ipynb_root, ipynb_route)
# lecture_ipynb_path = os.path.join(week_path, file[:-3] + ".ipynb")
# lecture_md_path = urllib.parse.quote(md_path)
# lecture_ipynb_path = urllib.parse.quote(lecture_ipynb_path)
lecture_toc_md.append(f"* [{Path(file).resolve().stem.title()}]({lecture_md_path}) \([ipynb]({lecture_ipynb_path})\)")
with open(os.path.join(lecture_root, lecture_md_path), 'r+') as lecture_md_file:
lines = lecture_md_file.readlines()
header = [
f"---",
f"layout: default",
f"title: {file[:-3]}",
f"parent: {week_title}",
f"grand_parent: {lecture_toc_title}",
f"nav_order: {file_nav_order}",
f"---",
f""
]
file_nav_order += 1
lines.insert(0, '\n'.join(header))
lecture_md_file.seek(0)
lecture_md_file.writelines(lines)
with open(os.path.join(lecture_root, 'lectures.md'), 'w') as f:
f.write('\n'.join(lecture_toc_md))
|
128209
|
import os
import pytest
from deduplication.commands.search import search
from deduplication.tests.conftest import delete_output, mkdir_output, PROJECT_DIR, POTATOES_BASE_PATH, checkEqual
@pytest.mark.parametrize(
'tree_type, distance_metric, nearest_neighbors, leaf_size, parallel, batch_size, threshold, '
'image_w, image_h, query, show, expected',
[('KDTree', 'manhattan', 5, 40, False, 32, 40, 128, 128,
os.path.join(POTATOES_BASE_PATH, '2018-12-11-15-031193.png'), False,
[[8.0, 14.0, 18.0, 22.0], [1, 5, 3, 2]]),
('cKDTree', 'manhattan', 5, 40, False, 32, 40, 128, 128,
os.path.join(POTATOES_BASE_PATH, '2018-12-11-15-031193.png'), False,
[[8.0, 14.0, 18.0, 22.0], [1, 5, 3, 2]])
])
def test_search(build_potato_dataset, tree_type, distance_metric, nearest_neighbors, leaf_size,
parallel, batch_size, threshold, image_w, image_h, query, show, expected):
output_path = mkdir_output(os.path.join(str(PROJECT_DIR), "outputs"))
df_dataset, img_file_list = build_potato_dataset
distances, indices = search(df_dataset,
output_path,
tree_type,
distance_metric,
nearest_neighbors,
leaf_size,
parallel,
batch_size,
threshold,
image_w,
image_h,
query, show)
assert checkEqual(distances, expected[0])
assert checkEqual(indices, expected[1])
delete_output(output_path)
|
128282
|
from xdoctest import checker
from xdoctest import directive
# from xdoctest import utils
def test_visible_lines():
"""
pytest testing/test_checker.py
"""
got = 'this is invisible\ronly this is visible'
print(got)
want = 'only this is visible'
assert checker.check_output(got, want)
def test_visible_lines_explicit():
"""
pytest testing/test_checker.py
"""
got = 'invisible\rIS-visible'
want = 'invisible\rIS-visible'
# The got-want checker is quite permissive.
# Use asserts for non-permissive tests.
assert checker.check_output(got, want)
def test_blankline_accept():
"""
pytest testing/test_checker.py
"""
# Check that blankline is normalized away
runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': False})
got = 'foo\n\nbar'
want = 'foo\n<BLANKLINE>\nbar'
assert checker.check_output(got, want, runstate)
def test_blankline_failcase():
# Check that blankline is not normalizd in a "got" statement
runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': False})
got = 'foo\n<BLANKLINE>\nbar'
want = 'foo\n\nbar'
assert not checker.check_output(got, want, runstate)
def test_blankline_not_accept():
# Check that blankline is not normalized away when
# DONT_ACCEPT_BLANKLINE is on
runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': True})
got = 'foo\n\nbar'
want = 'foo\n<BLANKLINE>\nbar'
assert not checker.check_output(got, want, runstate)
|
128283
|
from django.db import DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Name of database')
parser.add_argument(
'--plan', action='store_true',
help='Show a list of actions to perform')
parser.add_argument(
'--noinput', '--no-input', action='store_false',
dest='interactive',
help='Never prompt for input')
parser.add_argument(
'--reverse', action='store_true',
help='Apply migrations in reverse order')
parser.add_argument(
'filename',
help='name of file listing migrations to apply')
def handle(self, *args, **options):
migrations = []
with open(options['filename'], 'r') as f:
for l in f:
(app, name) = l.split()
migrations.append((app, name))
if options['reverse']:
migrations.reverse()
for (app, name) in migrations:
call_command('migrate', app_label=app,
migration_name=name,
database=options['database'],
plan=options['plan'],
interactive=options['interactive'],
verbosity=options['verbosity'])
|
128304
|
import pytest
from algosec.errors import EmptyFlowSearch
from library import algosec_add_single_application_flow
from tests.conftest import AnsibleExitJson, ALGOSEC_SERVER, ALGOSEC_USER, ALGOSEC_PASSWORD, ALGOSEC_CERTIFY_SSL, my_vcr
PROPER_ARGS = dict(
ip_address=ALGOSEC_SERVER,
user=ALGOSEC_USER,
password=<PASSWORD>,
certify_ssl=ALGOSEC_CERTIFY_SSL,
app_name="Payroll",
name="new-test-flow-2",
sources=["192.168.12.12", "HR Payroll server", "192.168.0.0/16"],
destinations=["192.168.3.11", "192.168.3.11"],
services=["HTTPS", "http", "tcp/80", "tcp/51"]
)
MODIFIED_FLOW_ARGS = dict(
ip_address=ALGOSEC_SERVER,
user=ALGOSEC_USER,
password=<PASSWORD>,
certify_ssl=ALGOSEC_CERTIFY_SSL,
app_name="Payroll",
name="new-test-flow-2",
sources=["192.168.12.12", "HR Payroll server", "192.168.0.0/16"],
destinations=["192.168.3.11", "192.168.3.11"],
services=["HTTPS"]
)
class TestDefineApplicationFlows(object):
ansible_module = algosec_add_single_application_flow
@staticmethod
def does_flow_exists(abf_client, app_name, flow_name):
"""Used to delete a flow on ABF to prepare for the tests"""
app_revision_id = abf_client.get_application_revision_id_by_name(app_name)
try:
abf_client.get_flow_by_name(app_revision_id, flow_name)
return True
except EmptyFlowSearch:
return False
@pytest.mark.parametrize('ansible_module_args', [PROPER_ARGS], indirect=True)
@my_vcr.use_cassette('add_single_application_flow_1.yaml')
def test_create_new_application_flow(self, abf_client, ansible_module_args, ansible_module):
app_name = ansible_module_args['app_name']
flow_name = ansible_module_args['name']
if self.does_flow_exists(abf_client, app_name, flow_name):
raise UserWarning(
"Please manually delete {} flow from {} ABF app, "
"apply any drafts and resolve change requests for this unittest to properly run.".format(
flow_name,
app_name,
)
)
# TODO: Test also creation of new network objects, by deleting some of the ones that are used
# TODO: Very similar to how the flow deletion is taking place.
with pytest.raises(AnsibleExitJson) as result:
ansible_module.main()
return_value = result.value.args[0]
assert return_value['changed'], "'changed' should be True as a new flow was created on ABF."
assert return_value['msg'] == 'Flow created/updated successfully!'
# TODO: Add to the ansible output whether the flow was created or updated
@pytest.mark.parametrize('ansible_module_args', [PROPER_ARGS], indirect=True)
@my_vcr.use_cassette('add_single_application_flow_2.yaml')
def test_not_creating_an_already_existing_flow(self, abf_client, ansible_module_args, ansible_module):
app_name = ansible_module_args['app_name']
flow_name = ansible_module_args['name']
if not self.does_flow_exists(abf_client, app_name, flow_name):
raise UserWarning(
"Please run the `test_create_new_application_flow` unittest before running this unittest "
"to make sure the flow already exists on ABF."
)
with pytest.raises(AnsibleExitJson) as result:
ansible_module.main()
return_value = result.value.args[0]
assert not return_value['changed']
assert return_value['msg'] == 'Flow already exists on AlgoSec BusinessFlow.'
@pytest.mark.parametrize('ansible_module_args', [MODIFIED_FLOW_ARGS], indirect=True)
@my_vcr.use_cassette('add_single_application_flow__changing_existing_flow.yaml')
def test_changing_existing_flow(self, abf_client, ansible_module_args, ansible_module):
app_name = ansible_module_args['app_name']
flow_name = ansible_module_args['name']
if not self.does_flow_exists(abf_client, app_name, flow_name):
raise UserWarning(
"Please run the `test_create_new_application_flow` unittest before running this unittest "
"to make sure the flow already exists on ABF."
)
with pytest.raises(AnsibleExitJson) as result:
ansible_module.main()
return_value = result.value.args[0]
assert return_value['changed']
assert return_value['msg'] == 'Flow created/updated successfully!'
# TODO: Add to the ansible output whether the flow was created or updated
|
128325
|
import numpy
from matplotlib import pyplot
def forward_difference(f, x0, h):
return (f(x0+h) - f(x0)) / h
def backward_difference(f, x0, h):
return (f(x0) - f(x0-h)) / h
def central_difference(f, x0, h):
return (f(x0+h) - f(x0-h)) / (2*h)
def euler(f, x_end, y0, N):
x, dx = numpy.linspace(0, x_end, N+1, retstep=True)
y = numpy.zeros((len(y0),N+1))
y[:,0] = y0
for n in range(N):
y[:,n+1] = y[:,n] + dx * f(x[n], y[:,n])
return x, dx, y
if __name__=="__main__":
h = 0.5
print("Forward difference, h=",h, "y'=",
forward_difference(numpy.exp, 0, h))
print("Backward difference, h=",h, "y'=",
backward_difference(numpy.exp, 0, h))
print("Central difference, h=",h, "y'=",
central_difference(numpy.exp, 0, h))
h = 0.05
print("Forward difference, h=",h, "y'=",
forward_difference(numpy.exp, 0, h))
print("Backward difference, h=",h, "y'=",
backward_difference(numpy.exp, 0, h))
print("Central difference, h=",h, "y'=",
central_difference(numpy.exp, 0, h))
h_all = 0.5/2**numpy.arange(1,10)
errors_forward = numpy.zeros_like(h_all)
errors_backward = numpy.zeros_like(h_all)
errors_central = numpy.zeros_like(h_all)
for i, h in enumerate(h_all):
errors_forward[i] = abs(1 - forward_difference(numpy.exp, 0, h))
errors_backward[i] = abs(1 - backward_difference(numpy.exp, 0, h))
errors_central[i] = abs(1 - central_difference(numpy.exp, 0, h))
pyplot.figure(figsize=(12,6))
pyplot.loglog(h_all, errors_forward, 'kx', label="Forward")
pyplot.loglog(h_all, errors_backward, 'bo', label="Backward")
pyplot.loglog(h_all, errors_central, 'r^', label="Central")
pyplot.loglog(h_all, h_all/h_all[0]*errors_forward[0], 'b-',
label=r"$\propto h$")
pyplot.loglog(h_all, (h_all/h_all[0])**2*errors_central[0], 'g-',
label=r"$\propto h^2$")
pyplot.xlabel(r"$h$")
pyplot.ylabel("Error")
pyplot.legend(loc="upper left")
pyplot.show()
def f_sin(x, y):
return -numpy.sin(x)
print("Euler's Method")
x, dx, y = euler(f_sin, 0.5, [1], 5)
print("dx=", dx, "y(0.5)=", y[0,-1])
x, dx, y = euler(f_sin, 0.5, [1], 50)
print("dx=", dx, "y(0.5)=", y[0,-1])
Npoints = 5*2**numpy.arange(1,10)
dx_all = 0.5/Npoints
errors = numpy.zeros_like(dx_all)
for i, N in enumerate(Npoints):
x, dx, y = euler(f_sin, 0.5, [1], N)
errors[i] = abs(y[0,-1] - numpy.cos(0.5))
dx_all[i] = dx
pyplot.figure(figsize=(12,6))
pyplot.loglog(dx_all, errors, 'kx')
pyplot.loglog(dx_all, errors[0]*(dx_all/dx_all[0])**1, 'b-',
label=r"$\propto \Delta x$")
pyplot.legend(loc='upper left')
pyplot.xlabel(r"$\Delta x$")
pyplot.ylabel("Error")
pyplot.show()
def f_circle(x, y):
dydx = numpy.zeros_like(y)
dydx[0] = -y[1]
dydx[1] = y[0]
return dydx
y0 = numpy.array([1, 0])
x, dx, y = euler(f_circle, 50, y0, 500)
pyplot.figure(figsize=(8,8))
pyplot.plot(y[0,:], y[1,:])
pyplot.show()
x, dx, y = euler(f_circle, 50, y0, 5000)
pyplot.figure(figsize=(8,8))
pyplot.plot(y[0,:], y[1,:])
pyplot.show()
|
128334
|
import numpy as np
import time, re
from .. import ROOT
from .. import larcv
from iomanager import IOManager
from imagefactory import ImageFactory
# data manger helps get the producers from the ROOT file
# as well as manage factory creation of images as the user
# hits replot, next/prev event
class DataManager(object):
def __init__(self,argv):
self.iom = IOManager(argv)
self.keys ={}
self.IF = ImageFactory()
# get keys from rootfile, iterate over the enum
# and see what's in the root file
for i in xrange(larcv.kProductUnknown):
product = larcv.ProductName(i)
self.keys[product] = []
producers=self.iom.iom.producer_list(i)
for p in producers:
self.keys[product].append(p)
# run subrun and event start at zero
self.run = -1
self.subrun = -1
self.event = -1
def get_nchannels(self,ii,imgprod) :
# Sorry Vic I hacked this
# --> it's ok
self.iom.read_entry(ii)
imdata = self.iom.get_data(larcv.kProductImage2D,imgprod)
return imdata.Image2DArray().size()
def get_event_image(self,ii,imgprod,roiprod,planes, refresh=True) :
#Load data in TChain
self.iom.read_entry(ii)
# there may be no ROI
hasroi = False
roidata = None
if roiprod is not None:
roidata = self.iom.iom.get_data(larcv.kProductROI,roiprod)
roidata = roidata.ROIArray()
hasroi = True
# get the EventImage2D
imdata = self.iom.get_data(larcv.kProductImage2D,imgprod) # goes to disk
self.run = imdata.run()
self.subrun = imdata.subrun()
self.event = imdata.event()
# get the std::vector<larcv::Image2D>
imdata = imdata.Image2DArray()
if imdata.size() == 0 : return (None, False)
# hand it off to the factory, the producer name should query the
# the correct subclass of PlotImage
image = self.IF.get(imdata,roidata,planes,imgprod) # returns PlotImgae
# return it to rgbviewer
return ( image, hasroi )
# -----------------------------------------------------------------------------
# Erez, July-21, 2016 - get an image using R/S/E navigation
# -----------------------------------------------------------------------------
def get_all_images(self,imgprod,event_base_and_images,rse_map) :
for entry in range(self.iom.get_n_entries()):
read_entry = self.iom.read_entry(entry)
event_base = self.iom.get_data(larcv.kProductImage2D,imgprod)
event_base_and_images[entry] = event_base
rse = ( int(event_base.run()),int(event_base.subrun()),int(event_base.event()) )
#print rse
#rse_map[entry] = [event_base.run(),event_base.subrun(),event_base.event()]
rse_map[ rse ] = entry
# print rse_map[entry]
print "collected %d images...\nready for RSE navigation"%len(event_base_and_images)
return
# -----------------------------------------------------------------------------
# Erez, July-21, 2016 - get an image using R/S/E navigation
# -----------------------------------------------------------------------------
def get_rse_image(self,event_base_and_images,rse_map,wanted_rse,imgprod,roiprod,planes, refresh=True) :
if wanted_rse in rse_map:
return self.get_event_image(rse_map[wanted_rse],imgprod,roiprod,planes,refresh)
else:
print "i couldn't find this R/S/E..."
return None, False
ii = -1
for i in range(len(event_base_and_images)):
if rse_map[i] == wanted_rse:
ii = i
break
if (ii==-1):
print "i couldn't find this R/S/E..."
return self.get_event_image(ii,imgprod,roiprod,planes,refresh)
# -----------------------------------------------------------------------------
|
128395
|
from selenium.webdriver import Firefox
url = 'http://selenium.dunossauro.live/aula_05_c.html'
firefox = Firefox()
firefox.get(url)
def melhor_filme(browser, filme, email, telefone):
"""Preenche o formulário do melhor filme de 2020."""
browser.find_element_by_name('filme').send_keys(filme)
browser.find_element_by_name('email').send_keys(email)
browser.find_element_by_name('telefone').send_keys(telefone)
browser.find_element_by_name('enviar').click()
melhor_filme(
firefox,
'Parasita',
'<EMAIL>',
'(019)987654321'
)
firefox.quit()
|
128405
|
import os
import logging
from pathlib import Path
from collections import UserDict
import yaml
import tree_hugger.setup_logging
from tree_hugger.exceptions import QueryFileNotFoundError
class Query(UserDict):
data = {}
def __init__(self, query_file_path: str, query_file_content: str):
self.query_file_path = query_file_path
self.update(query_file_content)
@staticmethod
def fromFile(query_file_path: str):
if not Path(query_file_path).exists() or not Path(query_file_path).is_file():
raise QueryFileNotFoundError(f"Cound not find {query_file_path}")
with open(query_file_path) as f:
query = Query(query_file_path, yaml.load(f, Loader=yaml.FullLoader))
return query
def reload(self):
with open(self.query_file_path) as f:
self.update(yaml.load(f, Loader=yaml.FullLoader))
|
128431
|
from .episode_iterator import EpisodeIterator # NOQA
from .minibatch_iterator import MinibatchIterator # NOQA
from .semisupervised_episode_iterator import SemiSupervisedEpisodeIterator # NOQA
from .sim_episode_iterator import SimEpisodeIterator # NOQA
|
128433
|
import unittest
from swmm_mpc.rpt_ele import rpt_ele
class test_rpt_ele(unittest.TestCase):
test_rpt_file = "example.rpt"
rpt = rpt_ele(test_rpt_file)
def test_total_flood(self):
true_flood_vol = 0.320
self.assertEqual(true_flood_vol, self.rpt.total_flooding)
def test_get_start_line(self):
start_text = 'Infiltration Method'
start_line = self.rpt.get_start_line(start_text)
self.assertEqual(start_line, 23)
start_text = 'Node Surcharge Summary'
start_line = self.rpt.get_start_line(start_text)
self.assertEqual(start_line, 138)
def test_get_end_line(self):
start_text = 'Node Depth Summary'
start_line = self.rpt.get_start_line(start_text)
end_line = self.rpt.get_end_line(start_line)
self.assertEqual(end_line, 118)
if __name__ == '__main__':
unittest.main()
|
128493
|
from random import randint
from retrying import retry
import apysc as ap
from apysc._display.y_interface import YInterface
from apysc._type.variable_name_interface import VariableNameInterface
from tests.testing_helper import assert_attrs
class TestAnimationY:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test___init__(self) -> None:
target: VariableNameInterface = VariableNameInterface()
target.variable_name = 'test_animation_y'
animation_y: ap.AnimationY = ap.AnimationY(
target=target, y=100, duration=2000, delay=1000,
easing=ap.Easing.EASE_OUT_QUINT)
assert_attrs(
expected_attrs={
'_target': target,
'_y': 100,
'_duration': 2000,
'_delay': 1000,
'_easing': ap.Easing.EASE_OUT_QUINT,
},
any_obj=animation_y)
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__get_animation_func_expression(self) -> None:
target: VariableNameInterface = VariableNameInterface()
target.variable_name = 'test_animation_y'
animation_y: ap.AnimationY = ap.AnimationY(target=target, y=100)
expression: str = animation_y._get_animation_func_expression()
assert expression == f'\n .y({animation_y._y.variable_name});'
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__get_complete_event_in_handler_head_expression(self) -> None:
target: YInterface = YInterface()
target.variable_name = 'test_y_interface'
animation_y: ap.AnimationY = ap.AnimationY(target=target, y=100)
expression: str = animation_y.\
_get_complete_event_in_handler_head_expression()
assert expression == (
f'{target._y.variable_name} = '
f'{animation_y._y.variable_name};'
)
|
128509
|
import numpy as np
from ..rdkit import smiles_list_to_fingerprints, precursors_from_templates
def tanimoto(fp1, fp2):
a = fp1.sum()
b = fp2.sum()
c = float((fp1&fp2).sum())
return c/(a+b-c)
def pairwise_tanimoto(arr1, arr2, metric=tanimoto):
if arr1.size == 0:
return np.array([[]])
return cdist(arr1, arr2, metric=metric)
def diversity_from_smiles_list(smiles_list, fp_length=2048, fp_radius=2, nproc=1):
fps = smiles_list_to_fingerprints(smiles_list, fp_length=fp_length, fp_radius=fp_radius, nproc=nproc)
similarity = pairwise_tanimoto(fps, fps)
diversity = 1 - similarity
np.fill_diagonal(diversity, np.nan)
return diversity
def diversity(model, test_smiles, templates, topk=100, fp_length=2048, fp_radius=2, nproc=1):
div = []
for smi in test_smiles:
fp = smiles_to_fingerprint(smi, length=fp_length, radius=fp_radius)
pred = model.predict(fp.reshape(1, -1)).reshape(-1)
ind = np.argsort(-pred)[:topk]
precursors = precursors_from_templates(smi, templates[ind], nproc=nproc)
div.append(diversity_from_smiles_list(precursors, nproc=nproc))
return div
|
128518
|
import numpy as np
from Utils.Data.DatasetUtils import is_test_or_val_set, get_train_set_id_from_test_or_val_set, \
get_test_or_val_set_id_from_train
from Utils.Data.Features.Generated.TweetFeature.IsEngagementType import *
from Utils.Data.Features.MappedFeatures import MappedFeatureEngagerId, MappedFeatureCreatorId, \
MappedFeatureTweetLanguage
import time
def find_and_increase_engager(eng_id, cre_id, lang, dictionary):
# Number of time the user_1 has interacted with user_2
current_count = dictionary.get((eng_id, lang), 0)
dictionary[(cre_id, lang)] = dictionary.get((cre_id, lang), 0) + 1
dictionary[(eng_id, lang)] = current_count + 1
return current_count
def find_and_increase_creator(eng_id, cre_id, lang, dictionary):
# Number of time the user_1 has interacted with user_2
current_count = dictionary.get((eng_id, lang), 0)
dictionary[(cre_id, lang)] = dictionary.get((cre_id, lang), 0) + 1
return current_count
class EngagerFeatureNumberOfPreviousLikeEngagementWithLanguage(GeneratedFeaturePickle):
# Has the creator ever liked a tweet of the engager? If yes, how many times?
def __init__(self, dataset_id: str):
super().__init__("engager_feature_number_of_previous_like_engagement_with_language",
dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.csv.gz")
def create_feature(self):
# Check if the dataset id is train or test
if is_test_or_val_set(self.dataset_id):
train_dataset_id = get_train_set_id_from_test_or_val_set(self.dataset_id)
test_dataset_id = self.dataset_id
else:
train_dataset_id = self.dataset_id
test_dataset_id = get_test_or_val_set_id_from_train(train_dataset_id)
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(train_dataset_id)
engagers_feature = MappedFeatureEngagerId(train_dataset_id)
creators_feature = MappedFeatureCreatorId(train_dataset_id)
language_feature = MappedFeatureTweetLanguage(train_dataset_id)
engagement_feature = TweetFeatureEngagementIsLike(train_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
engagement_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
# KEY: a tuple (creator, engager)
# VALUE: the number of time the engager has engaged with the creator
# If key does not exists -> 0 times.
engagement_dict = {}
result = pd.DataFrame(
[find_and_increase_engager(eng_id, cre_id, lang, engagement_dict)
if engagement
else engagement_dict.get((eng_id, lang), 0)
for eng_id, cre_id, lang, engagement
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name],
dataframe[engagement_feature.feature_name])],
index=dataframe.index
)
if not EngagerFeatureNumberOfPreviousLikeEngagementWithLanguage(train_dataset_id).has_feature():
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousLikeEngagementWithLanguage(train_dataset_id).save_feature(result)
if not EngagerFeatureNumberOfPreviousLikeEngagementWithLanguage(test_dataset_id).has_feature():
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(test_dataset_id)
engagers_feature = MappedFeatureEngagerId(test_dataset_id)
language_feature = MappedFeatureTweetLanguage(test_dataset_id)
creators_feature = MappedFeatureCreatorId(test_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
result = pd.DataFrame(
[find_and_increase_creator(eng_id, cre_id, lang, engagement_dict)
for eng_id, cre_id, lang
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name])],
index=dataframe.index
)
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousLikeEngagementWithLanguage(test_dataset_id).save_feature(result)
class EngagerFeatureNumberOfPreviousRetweetEngagementWithLanguage(GeneratedFeaturePickle):
# Has the creator ever liked a tweet of the engager? If yes, how many times?
def __init__(self, dataset_id: str):
super().__init__("engager_feature_number_of_previous_retweet_engagement_with_language",
dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.csv.gz")
def create_feature(self):
# Check if the dataset id is train or test
if is_test_or_val_set(self.dataset_id):
train_dataset_id = get_train_set_id_from_test_or_val_set(self.dataset_id)
test_dataset_id = self.dataset_id
else:
train_dataset_id = self.dataset_id
test_dataset_id = get_test_or_val_set_id_from_train(train_dataset_id)
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(train_dataset_id)
engagers_feature = MappedFeatureEngagerId(train_dataset_id)
creators_feature = MappedFeatureCreatorId(train_dataset_id)
language_feature = MappedFeatureTweetLanguage(train_dataset_id)
engagement_feature = TweetFeatureEngagementIsRetweet(train_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
engagement_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
# KEY: a tuple (creator, engager)
# VALUE: the number of time the engager has engaged with the creator
# If key does not exists -> 0 times.
engagement_dict = {}
result = pd.DataFrame(
[find_and_increase_engager(eng_id, cre_id, lang, engagement_dict)
if engagement
else engagement_dict.get((eng_id, lang), 0)
for eng_id, cre_id, lang, engagement
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name],
dataframe[engagement_feature.feature_name])],
index=dataframe.index
)
if not EngagerFeatureNumberOfPreviousRetweetEngagementWithLanguage(
train_dataset_id).has_feature():
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousRetweetEngagementWithLanguage(train_dataset_id).save_feature(result)
if not EngagerFeatureNumberOfPreviousRetweetEngagementWithLanguage(
test_dataset_id).has_feature():
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(test_dataset_id)
engagers_feature = MappedFeatureEngagerId(test_dataset_id)
language_feature = MappedFeatureTweetLanguage(test_dataset_id)
creators_feature = MappedFeatureCreatorId(test_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
result = pd.DataFrame(
[find_and_increase_creator(eng_id, cre_id, lang, engagement_dict)
for eng_id, cre_id, lang
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name])],
index=dataframe.index
)
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousRetweetEngagementWithLanguage(test_dataset_id).save_feature(result)
class EngagerFeatureNumberOfPreviousReplyEngagementWithLanguage(GeneratedFeaturePickle):
# Has the creator ever liked a tweet of the engager? If yes, how many times?
def __init__(self, dataset_id: str):
super().__init__("engager_feature_number_of_previous_reply_engagement_with_language",
dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.csv.gz")
def create_feature(self):
# Check if the dataset id is train or test
if is_test_or_val_set(self.dataset_id):
train_dataset_id = get_train_set_id_from_test_or_val_set(self.dataset_id)
test_dataset_id = self.dataset_id
else:
train_dataset_id = self.dataset_id
test_dataset_id = get_test_or_val_set_id_from_train(train_dataset_id)
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(train_dataset_id)
engagers_feature = MappedFeatureEngagerId(train_dataset_id)
creators_feature = MappedFeatureCreatorId(train_dataset_id)
language_feature = MappedFeatureTweetLanguage(train_dataset_id)
engagement_feature = TweetFeatureEngagementIsReply(train_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
engagement_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
# KEY: a tuple (creator, engager)
# VALUE: the number of time the engager has engaged with the creator
# If key does not exists -> 0 times.
engagement_dict = {}
result = pd.DataFrame(
[find_and_increase_engager(eng_id, cre_id, lang, engagement_dict)
if engagement
else engagement_dict.get((eng_id, lang), 0)
for eng_id, cre_id, lang, engagement
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name],
dataframe[engagement_feature.feature_name])],
index=dataframe.index
)
if not EngagerFeatureNumberOfPreviousReplyEngagementWithLanguage(
train_dataset_id).has_feature():
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousReplyEngagementWithLanguage(train_dataset_id).save_feature(result)
if not EngagerFeatureNumberOfPreviousReplyEngagementWithLanguage(
test_dataset_id).has_feature():
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(test_dataset_id)
engagers_feature = MappedFeatureEngagerId(test_dataset_id)
language_feature = MappedFeatureTweetLanguage(test_dataset_id)
creators_feature = MappedFeatureCreatorId(test_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
result = pd.DataFrame(
[find_and_increase_creator(eng_id, cre_id, lang, engagement_dict)
for eng_id, cre_id, lang
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name])],
index=dataframe.index
)
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousReplyEngagementWithLanguage(test_dataset_id).save_feature(result)
class EngagerFeatureNumberOfPreviousCommentEngagementWithLanguage(GeneratedFeaturePickle):
# Has the creator ever liked a tweet of the engager? If yes, how many times?
def __init__(self, dataset_id: str):
super().__init__("engager_feature_number_of_previous_comment_engagement_with_language",
dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.csv.gz")
def create_feature(self):
# Check if the dataset id is train or test
if is_test_or_val_set(self.dataset_id):
train_dataset_id = get_train_set_id_from_test_or_val_set(self.dataset_id)
test_dataset_id = self.dataset_id
else:
train_dataset_id = self.dataset_id
test_dataset_id = get_test_or_val_set_id_from_train(train_dataset_id)
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(train_dataset_id)
engagers_feature = MappedFeatureEngagerId(train_dataset_id)
creators_feature = MappedFeatureCreatorId(train_dataset_id)
language_feature = MappedFeatureTweetLanguage(train_dataset_id)
engagement_feature = TweetFeatureEngagementIsComment(train_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
engagement_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
# KEY: a tuple (creator, engager)
# VALUE: the number of time the engager has engaged with the creator
# If key does not exists -> 0 times.
engagement_dict = {}
result = pd.DataFrame(
[find_and_increase_engager(eng_id, cre_id, lang, engagement_dict)
if engagement
else engagement_dict.get((eng_id, lang), 0)
for eng_id, cre_id, lang, engagement
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name],
dataframe[engagement_feature.feature_name])],
index=dataframe.index
)
if not EngagerFeatureNumberOfPreviousCommentEngagementWithLanguage(
train_dataset_id).has_feature():
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousCommentEngagementWithLanguage(train_dataset_id).save_feature(result)
if not EngagerFeatureNumberOfPreviousCommentEngagementWithLanguage(
test_dataset_id).has_feature():
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(test_dataset_id)
engagers_feature = MappedFeatureEngagerId(test_dataset_id)
language_feature = MappedFeatureTweetLanguage(test_dataset_id)
creators_feature = MappedFeatureCreatorId(test_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
result = pd.DataFrame(
[find_and_increase_creator(eng_id, cre_id, lang, engagement_dict)
for eng_id, cre_id, lang
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name])],
index=dataframe.index
)
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousCommentEngagementWithLanguage(test_dataset_id).save_feature(result)
class EngagerFeatureNumberOfPreviousPositiveEngagementWithLanguage(GeneratedFeaturePickle):
# Has the creator ever liked a tweet of the engager? If yes, how many times?
def __init__(self, dataset_id: str):
super().__init__("engager_feature_number_of_previous_positive_engagement_with_language",
dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.csv.gz")
def create_feature(self):
# Check if the dataset id is train or test
if is_test_or_val_set(self.dataset_id):
train_dataset_id = get_train_set_id_from_test_or_val_set(self.dataset_id)
test_dataset_id = self.dataset_id
else:
train_dataset_id = self.dataset_id
test_dataset_id = get_test_or_val_set_id_from_train(train_dataset_id)
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(train_dataset_id)
engagers_feature = MappedFeatureEngagerId(train_dataset_id)
creators_feature = MappedFeatureCreatorId(train_dataset_id)
language_feature = MappedFeatureTweetLanguage(train_dataset_id)
engagement_feature = TweetFeatureEngagementIsPositive(train_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
engagement_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
# KEY: a tuple (creator, engager)
# VALUE: the number of time the engager has engaged with the creator
# If key does not exists -> 0 times.
engagement_dict = {}
result = pd.DataFrame(
[find_and_increase_engager(eng_id, cre_id, lang, engagement_dict)
if engagement
else engagement_dict.get((eng_id, lang), 0)
for eng_id, cre_id, lang, engagement
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name],
dataframe[engagement_feature.feature_name])],
index=dataframe.index
)
if not EngagerFeatureNumberOfPreviousPositiveEngagementWithLanguage(
train_dataset_id).has_feature():
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousPositiveEngagementWithLanguage(train_dataset_id).save_feature(result)
if not EngagerFeatureNumberOfPreviousPositiveEngagementWithLanguage(
test_dataset_id).has_feature():
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(test_dataset_id)
engagers_feature = MappedFeatureEngagerId(test_dataset_id)
language_feature = MappedFeatureTweetLanguage(test_dataset_id)
creators_feature = MappedFeatureCreatorId(test_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
result = pd.DataFrame(
[find_and_increase_creator(eng_id, cre_id, lang, engagement_dict)
for eng_id, cre_id, lang
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name])],
index=dataframe.index
)
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousPositiveEngagementWithLanguage(test_dataset_id).save_feature(result)
class EngagerFeatureNumberOfPreviousNegativeEngagementWithLanguage(GeneratedFeaturePickle):
# Has the creator ever liked a tweet of the engager? If yes, how many times?
def __init__(self, dataset_id: str):
super().__init__("engager_feature_number_of_previous_negative_engagement_with_language",
dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/number_of_previous_engagement_with_language/{self.feature_name}.csv.gz")
def create_feature(self):
# Check if the dataset id is train or test
if is_test_or_val_set(self.dataset_id):
train_dataset_id = get_train_set_id_from_test_or_val_set(self.dataset_id)
test_dataset_id = self.dataset_id
else:
train_dataset_id = self.dataset_id
test_dataset_id = get_test_or_val_set_id_from_train(train_dataset_id)
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(train_dataset_id)
engagers_feature = MappedFeatureEngagerId(train_dataset_id)
creators_feature = MappedFeatureCreatorId(train_dataset_id)
language_feature = MappedFeatureTweetLanguage(train_dataset_id)
engagement_feature = TweetFeatureEngagementIsNegative(train_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
engagement_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
# KEY: a tuple (creator, engager)
# VALUE: the number of time the engager has engaged with the creator
# If key does not exists -> 0 times.
engagement_dict = {}
result = pd.DataFrame(
[find_and_increase_engager(eng_id, cre_id, lang, engagement_dict)
if engagement
else engagement_dict.get((eng_id, lang), 0)
for eng_id, cre_id, lang, engagement
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name],
dataframe[engagement_feature.feature_name])],
index=dataframe.index
)
if not EngagerFeatureNumberOfPreviousNegativeEngagementWithLanguage(
train_dataset_id).has_feature():
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousNegativeEngagementWithLanguage(train_dataset_id).save_feature(result)
if not EngagerFeatureNumberOfPreviousNegativeEngagementWithLanguage(
test_dataset_id).has_feature():
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(test_dataset_id)
engagers_feature = MappedFeatureEngagerId(test_dataset_id)
language_feature = MappedFeatureTweetLanguage(test_dataset_id)
creators_feature = MappedFeatureCreatorId(test_dataset_id)
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
engagers_feature.load_or_create(),
creators_feature.load_or_create(),
language_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
result = pd.DataFrame(
[find_and_increase_creator(eng_id, cre_id, lang, engagement_dict)
for eng_id, cre_id, lang
in zip(dataframe[engagers_feature.feature_name],
dataframe[creators_feature.feature_name],
dataframe[language_feature.feature_name])],
index=dataframe.index
)
result.sort_index(inplace=True)
EngagerFeatureNumberOfPreviousNegativeEngagementWithLanguage(test_dataset_id).save_feature(result)
|
128553
|
from moto import mock_lambda, mock_logs
from newrelic_lambda_cli.cli import cli, register_groups
@mock_lambda
@mock_logs
def test_subscriptions_install(aws_credentials, cli_runner):
"""
Assert that 'newrelic-lambda subscriptions install' attempts to install the
New Relic log subscription on a function.
"""
register_groups(cli)
result = cli_runner.invoke(
cli,
[
"subscriptions",
"install",
"--no-aws-permissions-check",
"--function",
"foobar",
"--aws-region",
"us-east-1",
],
env={
"AWS_ACCESS_KEY_ID": "testing",
"AWS_SECRET_ACCESS_KEY": "testing",
"AWS_SECURITY_TOKEN": "testing",
"AWS_SESSION_TOKEN": "<PASSWORD>",
},
)
assert result.exit_code == 1
assert result.stdout == ""
assert (
"Could not find 'newrelic-log-ingestion' function. "
"Is the New Relic AWS integration installed?"
) in result.stderr
result2 = cli_runner.invoke(
cli,
[
"subscriptions",
"install",
"--no-aws-permissions-check",
"--function",
"foobar",
"--function",
"barbaz",
"--aws-region",
"us-east-1",
],
env={
"AWS_ACCESS_KEY_ID": "testing",
"AWS_SECRET_ACCESS_KEY": "testing",
"AWS_SECURITY_TOKEN": "testing",
"AWS_SESSION_TOKEN": "<PASSWORD>",
},
)
assert result2.exit_code == 1
assert result2.stdout == ""
assert (
"Could not find 'newrelic-log-ingestion' function. "
"Is the New Relic AWS integration installed?"
) in result2.stderr
@mock_lambda
@mock_logs
def test_subscriptions_uninstall(aws_credentials, cli_runner):
"""
Assert that 'newrelic-lambda subscriptions uninstall' attempts to uninstall the
New Relic log subscription on a function.
"""
register_groups(cli)
result = cli_runner.invoke(
cli,
[
"subscriptions",
"uninstall",
"--no-aws-permissions-check",
"--function",
"foobar",
"--aws-region",
"us-east-1",
],
env={
"AWS_ACCESS_KEY_ID": "testing",
"AWS_SECRET_ACCESS_KEY": "testing",
"AWS_SECURITY_TOKEN": "testing",
"AWS_SESSION_TOKEN": "<PASSWORD>",
},
)
assert result.exit_code == 1
assert result.stdout == ""
result2 = cli_runner.invoke(
cli,
[
"subscriptions",
"uninstall",
"--no-aws-permissions-check",
"--function",
"foobar",
"--function",
"barbaz",
"--aws-region",
"us-east-1",
],
env={
"AWS_ACCESS_KEY_ID": "testing",
"AWS_SECRET_ACCESS_KEY": "testing",
"AWS_SECURITY_TOKEN": "testing",
"AWS_SESSION_TOKEN": "<PASSWORD>",
},
)
assert result2.exit_code == 1
assert result2.stdout == ""
|
128600
|
import setuptools
setuptools.setup(
name="devrecargar",
version="0.1.4",
url="https://github.com/scottwoodall/django-devrecargar",
author="<NAME>",
author_email="<EMAIL>",
description="""
A Django app that automatically reloads your browser when a file
(py, html, js, css) changes.
""",
long_description=open('README.md').read(),
packages=setuptools.find_packages(),
license="MIT",
install_requires=['watchdog'],
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
)
|
128606
|
from typing import Any
import strawberry
from strawberry.types import Info
from src.api.context import Context
@strawberry.federation.type(extend=True)
class Query:
@strawberry.field
async def association_service(self, info: Info[Context, Any]) -> bool:
return True
|
128611
|
from __future__ import unicode_literals, division, print_function, absolute_import
from .simec import SimilarityEncoder, masked_mse, masked_binary_crossentropy, LastLayerReg
from .utils import center_K
|
128627
|
import logging
import requests
from .. import VERSION_STR
from ..exceptions import ConnectionError, ApiCallError
LOG = logging.getLogger(__name__)
class OpenRefsResponse:
"""Обёртка над ответом requests."""
def __init__(self, response: requests.Response):
"""
:param response:
"""
self._response = response
class HttpConnector:
"""Реализует обращение к ресурсам по средствам http(s)."""
def __init__(self, *, timeout=None):
self._timeout = timeout or 10
def _request(self, url: str, method: str = 'get', **kwargs):
timeout = self._timeout
method = getattr(requests, method)
LOG.debug('URL: %s', url)
def do_request():
request_kwargs = {
'headers': {
'User-agent': 'ruopenrefs/%s' % VERSION_STR,
},
'timeout': timeout,
'verify': False,
}
request_kwargs.update(kwargs)
try:
response = method(url, **request_kwargs) # type: requests.Response
except requests.ReadTimeout:
raise ConnectionError('Request timed out.') from None
except requests.ConnectionError:
raise ConnectionError('Unable to connect to %s.' % url)
try:
response.raise_for_status()
except requests.HTTPError:
msg = response.content
status_code = response.status_code
LOG.debug('API call error, code [%s]:\n%s', status_code, msg)
raise ApiCallError(msg, status_code)
return response
return OpenRefsResponse(response=do_request())
request = _request
|
128667
|
import unittest
from mock import Mock
from records_mover.records.schema.field.numpy import details_from_numpy_dtype
import numpy as np
class TestNumpy(unittest.TestCase):
def test_details_from_numpy_dtype(self):
tests = {
np.dtype(str): 'string',
np.dtype(int): 'integer',
np.dtype(np.datetime64): 'datetime',
}
for dtype, expected_field_type in tests.items():
mock_unique = Mock(name='unique')
actual_field_type, actual_constraints = details_from_numpy_dtype(dtype=dtype,
unique=mock_unique)
self.assertEqual(actual_field_type, expected_field_type)
|
128676
|
import time
import os
# for organization, encode parameters in dir name
def setOutDir(params):
timestamp = str(int(time.time()))
try:
jobid = os.environ['SLURM_JOBID']
except:
jobid = 'NOID'
if params['root'] is None:
root = os.path.join(os.environ['HOME'], "STM", "experiments")
else:
root = params['root']
if os.environ['IS_INTERACTIVE'] == 'true':
vers = "tmp"
else:
vers = params['version']
out_dir = os.path.abspath(os.path.join(
root,
params.get("type", "unknown_type"), # classifier/agent
"runs",
vers,
jobid + "_" + timestamp))
if params.get("type", "unknown_type") == "agent":
out_dir += "_mbs" + str(params['miniBatchSize'])
out_dir += "_tau" + str(params['tau'])
out_dir += "_es" + str(params['epsilonStart'])
out_dir += "_en" + str(params['epsilonStop'])
out_dir += "_opt" + str(params['optimizer'])
out_dir += "_lr" + str(params['learning-rate'])
if params['batchnorm']:
out_dir += "_bn"
else:
out_dir += "_noBn"
out_dir += "_rew" + str(params['reward'])
print("Number of random episodes: ", params['randomEps'])
out_dir += "_randEps" + str(params['randomEps'])
print("gamma: ", params['gamma'])
out_dir += "_gamma" + str(params['gamma'])
if params['dqnNN'] is not None:
try:
out_dir += "_init" + params['dqnNN'].split("/")[8].split("_")[0]
except:
pass
elif params['useClassNN'] is not None:
try:
out_dir += "_init" + params['classNN'].split("/")[8].split("_")[0]
except:
pass
else:
out_dir += "_initRand"
return out_dir
if params.get("type", "unknown_type") == "classifier":
print("Number of training steps: ", params['numTrainSteps'])
out_dir += "_" + str(params['numTrainSteps'])
print("miniBatchSize: ", params['miniBatchSize'])
out_dir += "_" + str(params['miniBatchSize'])
print("dropout", params['dropout'])
if params['dropout']:
out_dir += "_" + "drp" + str(params['dropout'])
else:
out_dir += "_" + "noDrp"
if params['distortBrightnessRelative'] or params['distortContrast']:
params['distorted'] = True
print("distorted", params['distorted'])
if params['distorted']:
out_dir += "_" + "augm"
delta = params['distortBrightnessRelative']
factor = params['distortContrast']
stddev = params['distortGaussian']
fracSP = params['distortSaltPepper']
if delta != 0:
out_dir += "_Br" + str(delta)
if factor != 0:
out_dir += "_Cntr-" + str(factor)
if stddev != 0:
out_dir += "_Gau-" + str(stddev)
if fracSP != 0:
out_dir += "_SP-" + str(fracSP)
else:
out_dir += "_" + "noAugm"
print("batchnorm", params['batchnorm'])
if params['batchnorm']:
out_dir += "_" + "bn-" + str(params['batchnorm-decay'])
else:
out_dir += "_" + "noBn"
out_dir += "_" + "cSz" + str(params['pxRes'])
print("weight decay", params['weight-decay'])
out_dir += "_wd" + str(params['weight-decay'])
print("learning rate", params['learning-rate'])
out_dir += "_lr" + str(params['learning-rate'])
if params["lr-decay"]:
out_dir += "Dc"
print("momentum", params['momentum'])
out_dir += "_mom" + str(params['momentum'])
print("optimizer", params['optimizer'])
out_dir += "_opt" + params['optimizer']
if params['in_dir'] is not None:
print("reading data from: ", params['in_dir'])
out_dir += params['in_dir'].split("/")[-1]
if params['aucLoss']:
out_dir += "_auc"
if params['penalizeFP']:
out_dir += "_penFP"
if params['relWeightPosSamples'] is not None:
out_dir += "_wPos" + str(params['relWeightPosSamples'])
if params['RANSAC']:
out_dir += "_ransac"
else:
out_dir += "_noRansac"
return out_dir
|
128691
|
import time
import torch
from labml import monit, logger
from labml.logger import Text
N = 10_000
def no_section():
arr = torch.zeros((1000, 1000))
for i in range(N):
for t in range(10):
arr += 1
def section():
arr = torch.zeros((1000, 1000))
for i in range(N):
with monit.section('run'):
for t in range(10):
arr += 1
def section_silent():
arr = torch.zeros((1000, 1000))
for i in range(N):
with monit.section('run', is_silent=True):
for t in range(10):
arr += 1
def main():
start = time.time()
no_section()
logger.log('No Section: ', (f'{time.time() - start}', Text.value))
start = time.time()
section()
logger.log('Section: ', (f'{time.time() - start}', Text.value))
start = time.time()
section_silent()
logger.log('Silent Section: ', (f'{time.time() - start}', Text.value))
if __name__ == '__main__':
main()
|
128705
|
from .libc import printf, scanf, localtime, asctime
from ctypes import c_int, create_string_buffer, byref, Structure
def input_pair():
key = c_int()
value = create_string_buffer(16)
printf(b"[Input a pair as int:string] ")
scanf(b"%i:%s", byref(key), byref(value))
return key, value.value
def print_a_time():
timer = c_int(12345678)
printf(asctime(localtime(byref(timer))))
|
128726
|
from django.urls import path
from .apps import AZIranianBankGatewaysConfig
from .views import callback_view, go_to_bank_gateway
app_name = AZIranianBankGatewaysConfig.name
_urlpatterns = [
path('callback/', callback_view, name='callback'),
path('go-to-bank-gateway/', go_to_bank_gateway, name='go-to-bank-gateway'),
]
def az_bank_gateways_urls():
return _urlpatterns, app_name, app_name
|
128742
|
import codecs
import yaml
from yaml.composer import Composer
from ansiblereview import Result, Error
def hunt_repeated_yaml_keys(data):
"""Parses yaml and returns a list of repeated variables and
the line on which they occur
"""
loader = yaml.Loader(data)
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
mapping = dict()
errors = dict()
for key_node, value_node in node.value:
key = key_node.value
if key in mapping:
if key in errors:
errors[key].append(key_node.__line__)
else:
errors[key] = [mapping[key], key_node.__line__]
mapping[key] = key_node.__line__
return errors
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
return data
def repeated_vars(candidate, settings):
with codecs.open(candidate.path, 'r') as f:
errors = hunt_repeated_yaml_keys(f) or dict()
return Result(candidate, [Error(err_line, "Variable %s occurs more than once" % err_key)
for err_key in errors for err_line in errors[err_key]])
|
128747
|
from mobula.utils import get_git_hash
def test_get_git_hash():
git_hash = get_git_hash()
assert type(git_hash) == str, (git_hash, type(git_hash))
assert len(git_hash) == 7 or git_hash == 'custom', git_hash
def test_edict():
from mobula.internal.edict import edict
data = edict(a=3, b=4)
assert 'a' in data
assert hasattr(data, 'a')
assert 'b' in data
assert hasattr(data, 'b')
assert len(data) == 2
assert data['a'] == 3
assert data['b'] == 4
data.a = 5
assert data['a'] == 5
data.a += 3
assert data['a'] == 8
data.update(dict(c=6))
assert 'c' in data
assert data['c'] == 6
data['c'] += 1
assert data['c'] == 7
del data.b
assert 'b' not in data
assert not hasattr(data, 'b')
assert len(data) == 2
del data['a']
assert 'a' not in data
assert len(data) == 1
|
128829
|
from sympy import acos
N = ReferenceFrame('N')
v1 = a * N.x + b * N.y + a * N.z
v2 = b * N.x + a * N.y + b * N.z
acos(v1.dot(v2) / (v1.magnitude() * v2.magnitude()))
|
128838
|
import sys
import logging
import argparse
from conda_docker.conda import (
build_docker_environment,
find_user_conda,
conda_info,
find_precs,
fetch_precs,
)
from conda_docker.logging import init_logging
def cli(args):
parser = argparse.ArgumentParser(description="Docker Environments")
subparsers = parser.add_subparsers()
init_subcommand_build(subparsers)
if len(args) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(args)
init_logging()
args.func(args)
def init_subcommand_build(subparser):
parser = subparser.add_parser("build", help="Docker Build Environment")
parser.add_argument(
"-b",
"--base",
type=str,
# mimimal image with glibc
default="frolvlad/alpine-glibc:latest",
help="base image:tag to use for docker build",
)
parser.add_argument(
"-i",
"--image",
type=str,
default="conda-docker:latest",
help="image:tag for output of docker envs build",
)
parser.add_argument(
"-p", "--prefix", default=None, help="prefix path to build from", dest="prefix"
)
parser.add_argument(
"-n", "--name", default=None, help="enviornment name to build from", dest="name"
)
parser.add_argument(
"--conda-exe", default=None, help="path to conda executable", dest="conda_exe"
)
parser.add_argument(
"-o", "--output", type=str, help="filename for docker image", required=True
)
parser.add_argument(
"-s",
"--solver",
default=None,
help="Which conda implementation to use as a solver. This will default to "
"mamba (if available), and the user's conda otherwise.",
)
parser.add_argument(
"--layering-strategy",
dest="layering_strategy",
default="layered",
choices={"layered", "single"},
help="The strategy to employ when adding layers to the image:\n"
"* single: put all packages into a single layer\n"
"* layered (default): try to place each package in its own layer.\n"
" noarch packages & leaf packages.",
)
parser.add_argument(
"package_specs",
nargs="*",
help="packages specs to install in image if environment or prefix not given",
)
parser.set_defaults(func=handle_conda_build)
def handle_conda_build(args):
user_conda = find_user_conda() if args.conda_exe is None else args.conda_exe
info = conda_info(user_conda)
platform = info["platform"]
download_dir = info["pkgs_dirs"][0]
default_prefix = info["default_prefix"]
channels = info.get("channels", [])
conda_default_channels = info.get("conda_default_channels", [])
channels_remap = info.get("channels_remap", [])
precs = find_precs(
user_conda,
download_dir,
channels=channels,
conda_default_channels=conda_default_channels,
channels_remap=channels_remap,
name=args.name,
prefix=args.prefix,
package_specs=args.package_specs,
solver=args.solver,
)
records = fetch_precs(download_dir, precs)
# now build image
build_docker_environment(
args.base,
args.image,
records,
args.output,
default_prefix,
download_dir,
user_conda,
channels_remap,
layering_strategy=args.layering_strategy,
)
def main(args=None):
args = sys.argv[1:] if args is None else args
try:
cli(args)
except KeyboardInterrupt:
logging.shutdown()
|
128850
|
import fileinput
import io
from html import escape
from html.entities import name2codepoint
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
script = False
in_a_g = False
record_next_y = False
g_buffer = None
y_limit = None
g_current_y = None
g_dict = dict()
def _print(self, s):
if self.g_buffer is not None:
print(s, file=self.g_buffer)
else:
print(s)
def handle_starttag(self, tag, attrs):
formatted_attrs = ""
for k, v in attrs:
formatted_attrs += f' {k}="{v}"'
if k == "y":
y = float(v)
if self.record_next_y:
self.y_limit = y
self.record_next_y = False
if self.g_current_y is None or self.g_current_y > y:
self.g_current_y = y
if tag == "script":
self.script = True
elif tag == "g":
self.in_a_g = True
self.g_buffer = io.StringIO()
self.g_current_y = None
self._print(f"<{tag}{formatted_attrs}>")
def handle_endtag(self, tag):
self._print(f"</{tag}>")
if tag == "script":
self.script = False
elif tag == "g":
if self.y_limit and self.g_current_y <= self.y_limit:
print(self.g_buffer.getvalue())
self.in_a_g = False
self.g_buffer.close()
self.g_buffer = None
def handle_data(self, data):
if not self.script:
data = escape(data)
if "::bench " in data:
self.record_next_y = True
self._print(data)
def handle_decl(self, data):
self._print(f"<!{data}>")
def handle_pi(self, data):
self._print(f"<?{data}>")
def handle_comment(self, data):
self._print(f"<!--{data}-->")
def handle_entityref(self, name):
c = chr(name2codepoint[name])
raise NotImplementedError(f"Named ent: {c}")
def handle_charref(self, name):
if name.startswith("x"):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
raise NotImplementedError(f"Num ent: {c}")
parser = MyHTMLParser()
for line in fileinput.input():
parser.feed(line)
|
128864
|
from typing import Type, Any, Optional, overload
from .core import resource as res
from .core.internal_models import meta_v1, autoscaling_v1
__all__ = ['create_global_resource', 'create_namespaced_resource']
_created_resources = {}
def get_generic_resource(version, kind):
global _created_resources
model = _created_resources.get((version, kind))
return model[0] if model is not None else None
class Generic(dict):
@overload
def __init__(self, apiVersion: str=None, kind: str=None,
metadata: meta_v1.ObjectMeta=None, **kwargs):
pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def apiVersion(self) -> str:
return self.get('apiVersion')
@property
def kind(self) -> str:
return self.get('kind')
@property
def status(self) -> str:
return self.get('status')
@property
def metadata(self) -> Optional[meta_v1.ObjectMeta]:
meta = self.get('metadata')
if meta is None:
return None
elif isinstance(meta, meta_v1.ObjectMeta):
return meta
return meta_v1.ObjectMeta.from_dict(meta)
def __getattr__(self, item):
if item.startswith("_"):
raise AttributeError(f"{item} not found")
return self.get(item)
@classmethod
def from_dict(cls, d: dict, lazy=True):
return cls(d)
def to_dict(self, dict_factory=dict):
d = dict_factory(self)
if 'metadata' in d and isinstance(d['metadata'], meta_v1.ObjectMeta):
d['metadata'] = d['metadata'].to_dict(dict_factory)
return d
def create_api_info(group, version, kind, plural, verbs=None) -> res.ApiInfo:
if verbs is None:
verbs = ['delete', 'deletecollection', 'get', 'global_list', 'global_watch', 'list', 'patch',
'post', 'put', 'watch']
return res.ApiInfo(
resource=res.ResourceDef(group, version, kind),
plural=plural,
verbs=verbs
)
class GenericGlobalScale(res.GlobalSubResource, autoscaling_v1.Scale):
pass
class GenericGlobalStatus(res.GlobalSubResource, Generic):
pass
class GenericNamespacedScale(res.NamespacedResourceG, autoscaling_v1.Scale):
pass
class GenericNamespacedStatus(res.NamespacedResourceG, Generic):
pass
def _create_subresource(main_class, parent_info: res.ApiInfo, action):
class TmpName(main_class):
_api_info = res.ApiInfo(
resource=parent_info.resource if action == 'status' else res.ResourceDef('autoscaling', 'v1', 'Scale'),
parent=parent_info.resource,
plural=parent_info.plural,
verbs=['get', 'patch', 'put'],
action=action,
)
TmpName.__name__ = TmpName.__qualname__ = f"{parent_info.resource.kind}{action.capitalize()}"
return TmpName
class GenericGlobalResource(res.GlobalResource, Generic):
Scale: Type[GenericGlobalScale]
Status: Type[GenericGlobalStatus]
class GenericNamespacedResource(res.NamespacedResourceG, Generic):
Scale: Type[GenericNamespacedScale]
Status: Type[GenericNamespacedStatus]
def _create_resource(namespaced, group, version, kind, plural, verbs=None) -> Any:
global _created_resources
res_key = (f'{group}/{version}', kind)
signature = (namespaced, plural, tuple(verbs) if verbs else None)
if res_key in _created_resources:
model, curr_signature = _created_resources[res_key]
if curr_signature != signature:
raise ValueError(f"Resource {kind} already created but with different signature")
return model
if namespaced:
main, status, scale = GenericNamespacedResource, GenericNamespacedStatus, GenericNamespacedScale
else:
main, status, scale = GenericGlobalResource, GenericGlobalStatus, GenericGlobalScale
class TmpName(main):
_api_info = create_api_info(group, version, kind, plural, verbs=verbs)
Scale = _create_subresource(scale, _api_info, action='scale')
Status = _create_subresource(status, _api_info, action='status')
TmpName.__name__ = TmpName.__qualname__ = kind
_created_resources[res_key] = (TmpName, signature)
return TmpName
def create_global_resource(group: str, version: str, kind: str, plural: str, verbs=None) \
-> Type[GenericGlobalResource]:
"""Create a new class representing a global resource with the provided specifications.
**Parameters**
* **group** `str` - API group of the resource. Example `stable.example.com`.
* **version** `str` - API group version. Example `v1`.
* **kind** `str` - Resource name. Example `Job`.
* **plural** `str` - Resource collection name. Example `jobs`.
**returns** Subclass of `GenericGlobalResource`.
"""
return _create_resource(
False, group, version, kind, plural, verbs=verbs)
def create_namespaced_resource(group: str, version: str, kind: str, plural: str, verbs=None) \
-> Type[GenericNamespacedResource]:
"""Create a new class representing a namespaced resource with the provided specifications.
**Parameters**
* **group** `str` - API group of the resource. Example `stable.example.com`.
* **version** `str` - API group version. Example `v1`.
* **kind** `str` - Resource name. Example `Job`.
* **plural** `str` - Resource collection name. Example `jobs`.
**returns** Subclass of `GenericNamespacedResource`.
"""
return _create_resource(
True, group, version, kind, plural, verbs=verbs)
|
128865
|
from http import cookiejar
from urllib import request, error
from urllib.parse import urlparse
class HtmlDownLoader(object):
def download(self, url, retry_count=3, headers=None, proxy=None, data=None):
if url is None:
return None
try:
req = request.Request(url, headers=headers, data=data)
cookie = cookiejar.CookieJar()
cookie_process = request.HTTPCookieProcessor(cookie)
opener = request.build_opener()
if proxy:
proxies = {urlparse(url).scheme: proxy}
opener.add_handler(request.ProxyHandler(proxies))
content = opener.open(req).read()
except error.URLError as e:
print('HtmlDownLoader download error:', e.reason)
content = None
if retry_count > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
#说明是 HTTPError 错误且 HTTP CODE 为 5XX 范围说明是服务器错误,可以尝试再次下载
return self.download(url, retry_count-1, headers, proxy, data)
return content
|
128957
|
import time
from datetime import timedelta
from django.db import transaction
from dpq.queue import AtLeastOnceQueue
from dpq.decorators import repeat
def foo(queue, job):
transaction.on_commit(lambda: 1/0)
print('foo {}'.format(job.args))
def timer(queue, job):
print(time.time() - job.args['time'])
def n_times(queue, job):
print('n_times', job.args['count'])
if job.args['count'] > 1:
queue.enqueue(job.task, {'count': job.args['count'] - 1})
@repeat(timedelta(seconds=1))
def repeater(queue, job):
print('repeat {}; eta {}'.format(job, job.execute_at))
def long_task(queue, job):
print('job started: {}'.format(job.id))
time.sleep(10)
print('job finished: {}'.format(job.id))
queue = AtLeastOnceQueue(
notify_channel='channel',
tasks={
'foo': foo,
'timer': timer,
'repeater': repeater,
'n_times': n_times,
'long_task': long_task,
},
)
|
128967
|
from setuptools import find_packages, setup
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="riot",
description="A simple Python test runner runner.",
url="https://github.com/DataDog/riot",
author="<NAME>.",
author_email="<EMAIL>",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
entry_points={"console_scripts": ["riot = riot.__main__:main"]},
long_description=long_description,
long_description_content_type="text/markdown",
license="Apache 2",
packages=find_packages(exclude=["tests*"]),
package_data={"riot": ["py.typed"]},
python_requires=">=3.6",
install_requires=[
"dataclasses; python_version<'3.7'",
"click>=7,<8",
"virtualenv",
"rich",
],
setup_requires=["setuptools_scm"],
use_scm_version=True,
# Required for mypy compatibility, see
# https://mypy.readthedocs.io/en/stable/installed_packages.html#making-pep-561-compatible-packages
zip_safe=False,
)
|
128985
|
class C:
def f(self, x):
pass
def g(self):
def f(x): #gets ignored by pytype but fixer sees it, generates warning (FIXME?)
return 1
return f
|
129010
|
import unittest
import os
import json
pymongo_missing = False
try:
import pymongo
except:
pymongo_missing = True
import logging
from reporter_config.Config import Config, Parser
class RCMultipleActionsTest(unittest.TestCase):
def setUp(self):
"""
Example message created by a conv function in a reporter
"""
with open(os.path.dirname(__file__) + '/rc_msg.json', 'r') as f:
self.msg = json.load(f)
self.client = pymongo.MongoClient("localhost", 27017)
self.collection = self.client["rc_test"]["alerts"]
# Remove the collection (just for assurance)
self.collection.drop()
def tearDown(self):
# Remove created file
os.remove("testfile.idea")
@unittest.skipIf(pymongo_missing, "missing pymongo, skipping mongodb test with elseactions")
def test_01_receive_message(self):
"""Perform multiple elseactions on matched message
If an action is matched perform these actions:
* Mark
* Mongo
* File
Load multiple_elseactions.yaml configuration file, parse it and analyze it
This shouldn't rise any exceptions, if action is performed, which it shouldn't,
it raises the DropMsg exception (uncaught here)
"""
self.parser = Parser(os.path.dirname(__file__) + '/rc_config/multiple_elseactions.yaml');
self.config = Config(self.parser);
self.assertNotEqual(self.config, None)
self.config.match(self.msg)
# The actions must be checked in reversed order
# Check if file created by File Action exists
self.assertTrue(os.path.exists("testfile.idea"), True)
# Find the event in DB
rec = self.collection.find_one()
self.assertTrue(rec["ID"], "e214d2d9-359b-443d-993d-3cc5637107a0")
# Check if message was marked
self.assertEqual(rec['Test'], True)
|
129023
|
from flask import Flask
from . import api, web
app = Flask(
__name__,
static_url_path='/assets',
static_folder='static',
template_folder='templates')
app.config['SECRET_KEY'] = 'secret' # this is fine if running locally
app.register_blueprint(api.bp)
app.register_blueprint(web.bp)
|
129029
|
import codecs
import os
import tempfile
from xml.dom import minidom
from six import PY2
from junit_xml import to_xml_report_file, to_xml_report_string
def serialize_and_read(test_suites, to_file=False, prettyprint=False, encoding=None):
"""writes the test suite to an XML string and then re-reads it using minidom,
returning => (test suite element, list of test case elements)"""
try:
iter(test_suites)
except TypeError:
test_suites = [test_suites]
if to_file:
fd, filename = tempfile.mkstemp(text=True)
os.close(fd)
with codecs.open(filename, mode="w", encoding=encoding) as f:
to_xml_report_file(f, test_suites, prettyprint=prettyprint, encoding=encoding)
print("Serialized XML to temp file [%s]" % filename)
xmldoc = minidom.parse(filename)
os.remove(filename)
else:
xml_string = to_xml_report_string(test_suites, prettyprint=prettyprint, encoding=encoding)
if PY2:
assert isinstance(xml_string, unicode) # noqa: F821
print("Serialized XML to string:\n%s" % xml_string)
if encoding:
xml_string = xml_string.encode(encoding)
xmldoc = minidom.parseString(xml_string)
def remove_blanks(node):
for x in node.childNodes:
if x.nodeType == minidom.Node.TEXT_NODE:
if x.nodeValue:
x.nodeValue = x.nodeValue.strip()
elif x.nodeType == minidom.Node.ELEMENT_NODE:
remove_blanks(x)
remove_blanks(xmldoc)
xmldoc.normalize()
ret = []
suites = xmldoc.getElementsByTagName("testsuites")[0]
for suite in suites.getElementsByTagName("testsuite"):
cases = suite.getElementsByTagName("testcase")
ret.append((suite, cases))
return ret
|
129030
|
from __future__ import absolute_import
import itertools
__all__ = ["Registry"]
class Registry(object):
"""The registry of access control list."""
def __init__(self):
self._roles = {}
self._resources = {}
self._allowed = {}
self._denied = {}
# to allow additional short circuiting, track roles that only
# ever deny access
self._denial_only_roles = set()
self._children = {}
def add_role(self, role, parents=[]):
"""Add a role or append parents roles to a special role.
All added roles should be hashable.
(http://docs.python.org/glossary.html#term-hashable)
"""
self._roles.setdefault(role, set())
self._roles[role].update(parents)
for p in parents:
self._children.setdefault(p, set())
self._children[p].add(role)
# all roles start as deny-only (unless one of its parents
# isn't deny-only)
if not parents or self._roles_are_deny_only(parents):
self._denial_only_roles.add(role)
def add_resource(self, resource, parents=[]):
"""Add a resource or append parents resources to a special resource.
All added resources should be hashable.
(http://docs.python.org/glossary.html#term-hashable)
"""
self._resources.setdefault(resource, set())
self._resources[resource].update(parents)
def allow(self, role, operation, resource, assertion=None):
"""Add a allowed rule.
The added rule will allow the role and its all children roles to
operate the resource.
"""
assert not role or role in self._roles
assert not resource or resource in self._resources
self._allowed[role, operation, resource] = assertion
# since we just allowed a permission, role and any children aren't
# denied-only
for r in itertools.chain([role], get_family(self._children, role)):
self._denial_only_roles.discard(r)
def deny(self, role, operation, resource, assertion=None):
"""Add a denied rule.
The added rule will deny the role and its all children roles to
operate the resource.
"""
assert not role or role in self._roles
assert not resource or resource in self._resources
self._denied[role, operation, resource] = assertion
def is_allowed(self, role, operation, resource, check_allowed=True,
**assertion_kwargs):
"""Check the permission.
If the access is denied, this method will return False; if the access
is allowed, this method will return True; if there is not any rule
for the access, this method will return None.
"""
assert not role or role in self._roles
assert not resource or resource in self._resources
roles = set(get_family(self._roles, role))
operations = {None, operation}
resources = set(get_family(self._resources, resource))
def DefaultAssertion(*args, **kwargs):
return True
is_allowed = None
default_assertion = DefaultAssertion
for permission in itertools.product(roles, operations, resources):
if permission in self._denied:
assertion = self._denied[permission] or default_assertion
if assertion(self, role, operation, resource,
**assertion_kwargs):
return False # denied by rule immediately
if check_allowed and permission in self._allowed:
assertion = self._allowed[permission] or default_assertion
if assertion(self, role, operation, resource,
**assertion_kwargs):
is_allowed = True # allowed by rule
return is_allowed
def is_any_allowed(self, roles, operation, resource, **assertion_kwargs):
"""Check the permission with many roles."""
is_allowed = None # no matching rules
for i, role in enumerate(roles):
# if access not yet allowed and all remaining roles could
# only deny access, short-circuit and return False
if not is_allowed and self._roles_are_deny_only(roles[i:]):
return False
check_allowed = not is_allowed
# if another role gave access,
# don't bother checking if this one is allowed
is_current_allowed = self.is_allowed(role, operation, resource,
check_allowed=check_allowed,
**assertion_kwargs)
if is_current_allowed is False:
return False # denied by rule
elif is_current_allowed is True:
is_allowed = True
return is_allowed
def _roles_are_deny_only(self, roles):
return all(r in self._denial_only_roles for r in roles)
def get_family(all_parents, current):
"""Iterate current object and its all parents recursively."""
yield current
for parent in get_parents(all_parents, current):
yield parent
yield None
def get_parents(all_parents, current):
"""Iterate current object's all parents."""
for parent in all_parents.get(current, []):
yield parent
for grandparent in get_parents(all_parents, parent):
yield grandparent
|
129061
|
import random
import torch
import torch.optim as optim
from parlai.agents.dialog_evaluator.auto_evaluator import (
TorchGeneratorWithDialogEvalAgent,
CorpusSavedDictionaryAgent
)
from parlai.core.metrics import AverageMetric
from parlai.core.torch_agent import History
from parlai.core.torch_generator_agent import Output
from parlai.core.torch_generator_agent import PPLMetric
from parlai.utils.misc import round_sigfigs, warn_once, AttrDict
from parlai.utils.torch import padded_tensor, padded_3d
from .modules import DialogWAE_GMP, DialogWAE
def make_floor(n):
floor = [0 for _ in range(n)]
for i in range(0, n, 2):
floor[i] = 1
return floor
class Batch(AttrDict):
def __init__(self, text_vec=None, text_lengths=None, context_lens=None,
floors=None, label_vec=None, label_lengths=None, labels=None,
valid_indices=None, candidates=None, candidate_vecs=None,
image=None, observations=None, **kwargs):
super().__init__(
text_vec=text_vec, text_lengths=text_lengths, context_lens=context_lens,
floors=floors, label_vec=label_vec, label_lengths=label_lengths, labels=labels,
valid_indices=valid_indices,
candidates=candidates, candidate_vecs=candidate_vecs,
image=image, observations=observations,
**kwargs)
class PersonDictionaryAgent(CorpusSavedDictionaryAgent):
def __init__(self, opt, shared=None):
"""Initialize DictionaryAgent."""
super().__init__(opt, shared)
if not shared:
delimiter = opt.get('delimiter', '\n')
self.add_token(delimiter)
self.freq[delimiter] = 999999999
if DialogWaeAgent.P1_TOKEN:
self.add_token(DialogWaeAgent.P1_TOKEN)
if DialogWaeAgent.P2_TOKEN:
self.add_token(DialogWaeAgent.P2_TOKEN)
if DialogWaeAgent.P1_TOKEN:
self.freq[DialogWaeAgent.P1_TOKEN] = 999999998
if DialogWaeAgent.P2_TOKEN:
self.freq[DialogWaeAgent.P2_TOKEN] = 999999997
class MultiTurnOnOneRowHistory(History):
def update_history(self, obs, add_next=None):
"""
Update the history with the given observation.
:param add_next:
string to append to history prior to updating it with the
observation
"""
coin_flip = 0
if self.field in obs and obs[self.field] is not None:
if self.split_on_newln:
next_texts = obs[self.field].split(self.delimiter)
else:
next_texts = [obs[self.field]]
for text in next_texts:
self._update_raw_strings(text)
if self.add_person_tokens:
text = self._add_person_tokens(
text, self.p1_token if coin_flip % 2 == 0 else self.p2_token)
coin_flip += 1
# update history string
self._update_strings(text)
# update history vecs
self._update_vecs(text)
def get_history_vec(self):
"""Returns a vectorized version of the history."""
if len(self.history_vecs) == 0:
return None
# if self.vec_type == 'deque':
# history = deque(maxlen=self.max_len)
# for vec in self.history_vecs[:-1]:
# history.extend(vec)
# history.extend(self.delimiter_tok)
# history.extend(self.history_vecs[-1])
# else:
# # vec type is a list
# history = []
# for vec in self.history_vecs[:-1]:
# history += vec
# history += self.delimiter_tok
# history += self.history_vecs[-1]
history = self.history_vecs
return history
class DialogWaeAgent(TorchGeneratorWithDialogEvalAgent):
@classmethod
def history_class(cls):
"""
Return the history class that this agent expects to use.
Can be overriden if a more complex history is required.
"""
return MultiTurnOnOneRowHistory
@staticmethod
def dictionary_class():
"""
Return the dictionary class that this agent expects to use.
Can be overriden if a more complex dictionary is required.
"""
return PersonDictionaryAgent
@classmethod
def add_cmdline_args(cls, argparser):
"""Add command-line arguments specifically for this agent."""
agent = argparser.add_argument_group('DialogWAE Arguments')
# Model Arguments
agent.add_argument('--rnn_class', type=str, default='gru', choices=['gru', 'lstm'])
agent.add_argument('-esz', '--embeddingsize', type=int, default=300,
help='Size of all embedding layers')
agent.add_argument('--maxlen', type=int, default=60,
help='maximum utterance length')
agent.add_argument('--hiddensize', type=int, default=512,
help='number of hidden units per layer')
agent.add_argument('--numlayers', type=int, default=2,
help='number of layers')
agent.add_argument('--noise_radius', type=float, default=0.2,
help='stdev of noise for autoencoder (regularizer)')
agent.add_argument('--z_size', type=int, default=200,
help='dimension of z (300 performs worse)')
agent.add_argument('--lambda_gp', type=int, default=10,
help='Gradient penalty lambda hyperparameter.')
agent.add_argument('--temp', type=float, default=1.0,
help='softmax temperature (lower --> more discrete)')
agent.add_argument('--input_dropout', type=float, default=0.0)
agent.add_argument('--dropout', type=float, default=0.2)
agent.add_argument('--gmp', type='bool', default=False)
# -- with the following two arguments, we have model ``DialogWAE_GMP''
agent.add_argument('--n_prior_components', type=int, default=3)
agent.add_argument('--gumbel_temp', type=float, default=0.1)
# -- if hred or vhred to be true, then this model degenerate into the vanilla HRED or VHRED
agent.add_argument('--hred', type='bool', default=False)
agent.add_argument('--vhred', type='bool', default=False)
agent.add_argument('--bow_w', type='bool', default=0.01)
# -- for HRAN
agent.add_argument('-attl', '--attention-length', default=48, type=int,
help='Length of local attention.')
agent.add_argument('-att', '--attention', default='none',
choices=['none', 'concat', 'general', 'dot', 'local'],
help='Choices: none, concat, general, local. '
'If set local, also set attention-length. '
'(see arxiv.org/abs/1508.04025)')
# Training Arguments
agent.add_argument('--n_iters_d', type=int, default=5,
help='number of discriminator iterations in training')
agent.add_argument('--lr_gan_g', type=float, default=5e-05,
help='model learning rate')
agent.add_argument('--lr_gan_d', type=float, default=1e-05,
help='critic/discriminator learning rate')
agent.add_argument('--gan_clamp', type=float, default=0.01,
help='WGAN clamp (Do not use clamp when you apply gradient penelty')
agent.add_argument('--norm_z', type='bool', default=False)
cls.dictionary_class().add_cmdline_args(argparser)
super(DialogWaeAgent, cls).add_cmdline_args(argparser)
return agent
def __init__(self, opt, shared=None):
"""Set up model."""
super().__init__(opt, shared)
self.id = 'DialogWAE'
if opt.get('hred', False) and opt.get('vhred', False):
raise RuntimeError('The flags hred and vhred can not set to be True simultaneously!')
if not opt.get('split_lines', False):
raise RuntimeError('"split_lines" must be True for DialogWAE!')
if not shared:
self.add_metric('loss_G', 0.0)
self.add_metric('loss_G_cnt', 0)
self.add_metric('loss_D', 0.0)
self.add_metric('loss_D_cnt', 0)
self.add_metric('kl_loss', 0.0)
self.add_metric('kl_loss_cnt', 0)
self.add_metric('bow_loss', 0.0)
self.add_metric('bow_loss_cnt', 0)
self.add_metric('to_minimize', 0.0)
if (
# only build an optimizer if we're training
'train' in opt.get('datatype', '') and
# and this is the main model, or on every fork if doing hogwild
(shared is None or self.opt.get('numthreads', 1) > 1)
):
self.optimizer_G = optim.RMSprop(list(self.model.post_net.parameters())
+ list(self.model.post_generator.parameters())
+ list(self.model.prior_net.parameters())
+ list(self.model.prior_generator.parameters()), lr=opt['lr_gan_g'])
self.optimizer_D = optim.RMSprop(self.model.discriminator.parameters(), lr=opt['lr_gan_d'])
def build_model(self, states=None):
special_tokens = [self.START_IDX,
self.END_IDX,
self.NULL_IDX,
self.dict[self.dict.unk_token]]
if self.opt.get('gmp', False) and not self.opt['hred']:
model = DialogWAE_GMP(self.opt, len(self.dict),
PAD_token=self.NULL_IDX,
unknown_idx=self.dict[self.dict.unk_token],
use_cuda=self.use_cuda,
special_tokens=special_tokens)
else:
model = DialogWAE(self.opt, len(self.dict),
PAD_token=self.NULL_IDX,
unknown_idx=self.dict[self.dict.unk_token],
use_cuda=self.use_cuda,
special_tokens=special_tokens)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
model.embedder.weight, self.opt['embedding_type'])
return model
def load_state_dict(self, state_dict):
"""
Load the state dict into model.
This is easily overridable to facilitate transfer of state dicts.
"""
self.model.load_state_dict(state_dict, strict=False)
def reset_metrics(self):
super().reset_metrics()
self.metrics['loss_G_cnt'] = 0
self.metrics['loss_G'] = 0.0
self.metrics['loss_D_cnt'] = 0
self.metrics['loss_D'] = 0.0
self.metrics['kl_loss_cnt'] = 0
self.metrics['kl_loss'] = 0.0
self.metrics['bow_loss_cnt'] = 0
self.metrics['bow_loss'] = 0.0
self.metrics['to_minimize'] = 0.0
def report(self):
base = super().report()
m = dict()
if self.metrics['loss_G_cnt'] > 0:
m['loss_G'] = self.metrics['loss_G'] / self.metrics['loss_G_cnt']
if self.metrics['loss_D_cnt'] > 0:
m['loss_D'] = self.metrics['loss_D'] / self.metrics['loss_D_cnt']
if self.metrics['kl_loss_cnt'] > 0:
m['kl_loss'] = self.metrics['kl_loss'] / self.metrics['kl_loss_cnt']
if self.metrics['bow_loss_cnt'] > 0:
m['bow_loss'] = self.metrics['bow_loss'] / self.metrics['bow_loss_cnt']
if 'loss_G' in m and 'loss_D' in m:
m['to_minimize'] = m['loss_G'] + m['loss_D']
for k, v in m.items():
# clean up: rounds to sigfigs and converts tensors to floats
base[k] = round_sigfigs(v, 4)
return base
def _set_text_vec(self, obs, history, truncate):
"""
Sets the 'text_vec' field in the observation.
Useful to override to change vectorization behavior
"""
if 'text' not in obs:
return obs
if 'text_vec' not in obs:
# text vec is not precomputed, so we set it using the history
history_string = history.get_history_str()
# when text not exist, we get text_vec from history string
# history could be none if it is an image task and 'text'
# filed is be empty. We don't want this
if history_string is None:
return obs
obs['full_text'] = history_string
if history_string:
obs['text_vec'] = history.get_history_vec()
# check truncation
if 'text_vec' in obs:
for idx, vec in enumerate(obs['text_vec']):
truncated_vec = self._check_truncate(vec, truncate, True)
obs['text_vec'][idx] = torch.LongTensor(truncated_vec)
return obs
def batchify(self, obs_batch, sort=False):
"""
Create a batch of valid observations from an unchecked batch.
A valid observation is one that passes the lambda provided to the
function, which defaults to checking if the preprocessed 'text_vec'
field is present which would have been set by this agent's 'vectorize'
function.
Returns a namedtuple Batch. See original definition above for in-depth
explanation of each field.
If you want to include additonal fields in the batch, you can subclass
this function and return your own "Batch" namedtuple: copy the Batch
namedtuple at the top of this class, and then add whatever additional
fields that you want to be able to access. You can then call
super().batchify(...) to set up the original fields and then set up the
additional fields in your subclass and return that batch instead.
:param obs_batch:
List of vectorized observations
:param sort:
Default False, orders the observations by length of vectors. Set to
true when using torch.nn.utils.rnn.pack_padded_sequence. Uses the text
vectors if available, otherwise uses the label vectors if available.
"""
if len(obs_batch) == 0:
return Batch()
valid_obs = [(i, ex) for i, ex in enumerate(obs_batch) if
self.is_valid(ex)]
if len(valid_obs) == 0:
return Batch()
valid_inds, exs = zip(*valid_obs)
# TEXT
xs, x_lens, context_lens, floors = None, None, None, None
if any('text_vec' in ex for ex in exs):
_xs = [ex.get('text_vec', [self.EMPTY]) for ex in exs]
xs = padded_3d(
_xs, self.NULL_IDX, self.use_cuda, fp16friendly=self.opt.get('fp16'),
)
x_lens = (xs != self.NULL_IDX).sum(dim=-1) # bsz, context_len
context_lens = (x_lens != 0).sum(dim=-1) # bsz
floors, _ = padded_tensor([make_floor(c_len.item()) for c_len in context_lens],
use_cuda=self.use_cuda)
# We do not sort on the xs which in the shape of [bsz, context_len, utt_len] is this agent
# if sort:
# sort = False # now we won't sort on labels
# xs, x_lens, valid_inds, exs = argsort(
# x_lens, xs, x_lens, valid_inds, exs, descending=True
# )
# LABELS
labels_avail = any('labels_vec' in ex for ex in exs)
some_labels_avail = (labels_avail or
any('eval_labels_vec' in ex for ex in exs))
ys, y_lens, labels = None, None, None
if some_labels_avail:
field = 'labels' if labels_avail else 'eval_labels'
label_vecs = [ex.get(field + '_vec', self.EMPTY) for ex in exs]
labels = [ex.get(field + '_choice') for ex in exs]
y_lens = [y.shape[0] for y in label_vecs]
ys, y_lens = padded_tensor(
label_vecs, self.NULL_IDX, self.use_cuda,
fp16friendly=self.opt.get('fp16')
)
y_lens = torch.LongTensor(y_lens)
if self.use_cuda:
y_lens = y_lens.cuda()
# We do not sort examples in batch for this agent
# if sort and xs is None:
# ys, valid_inds, label_vecs, labels, y_lens = argsort(
# y_lens, ys, valid_inds, label_vecs, labels, y_lens,
# descending=True
# )
# LABEL_CANDIDATES
cands, cand_vecs = None, None
if any('label_candidates_vecs' in ex for ex in exs):
cands = [ex.get('label_candidates', None) for ex in exs]
cand_vecs = [ex.get('label_candidates_vecs', None) for ex in exs]
# IMAGE
imgs = None
if any('image' in ex for ex in exs):
imgs = [ex.get('image', None) for ex in exs]
return Batch(text_vec=xs, text_lengths=x_lens, context_lens=context_lens,
floors=floors, label_vec=ys, label_lengths=y_lens,
labels=labels, valid_indices=valid_inds, candidates=cands,
candidate_vecs=cand_vecs, image=imgs, observations=exs)
def vectorize(self, obs, history, add_start=True, add_end=True,
text_truncate=None, label_truncate=None):
"""
Make vectors out of observation fields and store in the observation.
In particular, the 'text' and 'labels'/'eval_labels' fields are
processed and a new field is added to the observation with the suffix
'_vec'.
If you want to use additional fields on your subclass, you can override
this function, call super().vectorize(...) to process the text and
labels, and then process the other fields in your subclass.
Additionally, if you want to override some of these default parameters,
then we recommend using a pattern like:
.. code-block:: python
def vectorize(self, *args, **kwargs):
kwargs['add_start'] = False
return super().vectorize(*args, **kwargs)
:param obs:
Single observation from observe function.
:param add_start:
default True, adds the start token to each label.
:param add_end:
default True, adds the end token to each label.
:param text_truncate:
default None, if set truncates text vectors to the specified
length.
:param label_truncate:
default None, if set truncates label vectors to the specified
length.
:return:
the input observation, with 'text_vec', 'label_vec', and
'cands_vec' fields added.
"""
self._set_text_vec(obs, history, text_truncate)
self._set_label_vec(obs, True, True, label_truncate)
self._set_label_cands_vec(obs, add_start, add_end, label_truncate)
return obs
def _model_input(self, batch):
return (batch.text_vec,
batch.context_lens,
batch.text_lengths,
batch.floors,)
def compute_loss(self, batch, return_output=False):
if batch.label_vec is None:
raise ValueError('Cannot compute loss without a label.')
model_output = self.model(*self._model_input(batch),
ys=batch.label_vec,
res_lens=batch.label_lengths)
scores, preds, vhred_kl_loss, bow_loss, *_ = model_output
score_view = scores.view(-1, scores.size(-1))
loss = self.criterion(
score_view / self.opt['temp'],
batch.label_vec[:, 1:].contiguous().view(-1))
loss = loss.view(scores.shape[:-1]).sum(dim=1)
# save loss to metrics
notnull = batch.label_vec[:, :-1].ne(self.NULL_IDX)
target_tokens = notnull.long().sum(dim=-1)
correct = ((batch.label_vec[:, :-1] == preds) * notnull).sum(dim=-1)
self.record_local_metric('loss', AverageMetric.many(loss, target_tokens))
self.record_local_metric('ppl', PPLMetric.many(loss, target_tokens))
self.record_local_metric(
'token_acc', AverageMetric.many(correct, target_tokens)
)
# actually do backwards loss
loss = loss.sum()
loss /= target_tokens.sum() # average loss per token
# for vhred
if vhred_kl_loss != -1 and bow_loss != -1:
loss += (vhred_kl_loss * self.model.anneal_weight(self._number_training_updates)
+ self.opt['bow_w'] * bow_loss)
self.metrics['kl_loss_cnt'] += 1
self.metrics['kl_loss'] += vhred_kl_loss.item()
self.metrics['bow_loss_cnt'] += 1
self.metrics['bow_loss'] += bow_loss.item()
if return_output:
return (loss, model_output)
else:
return loss
def _dummy_batch(self, batchsize, maxlen):
context_lens = torch.LongTensor([3] * batchsize)
return Batch(
text_vec=torch.ones(batchsize, 3, maxlen).long().cuda(),
text_lengths=(torch.ones(batchsize, 3) * maxlen).long().cuda(),
context_lens=context_lens.cuda(),
floors=padded_tensor([make_floor(c_len.item()) for c_len in context_lens],
use_cuda=self.use_cuda)[0],
label_vec=torch.ones(batchsize, 2).long().cuda(),
label_lengths=torch.LongTensor([2] * batchsize).cuda()
)
def wae_gan_train_step(self, batch):
loss_G = self.model.train_G(batch.text_vec, batch.context_lens, batch.text_lengths,
batch.floors, batch.label_vec, batch.label_lengths,
self.optimizer_G)
self.metrics['loss_G_cnt'] += 1
self.metrics['loss_G'] += loss_G['train_loss_G']
for i in range(self.opt['n_iters_d']):
loss_D = self.model.train_D(batch.text_vec, batch.context_lens, batch.text_lengths,
batch.floors, batch.label_vec, batch.label_lengths,
self.optimizer_D)
if i == 0:
self.metrics['loss_D_cnt'] += 1
self.metrics['loss_D'] += loss_D['train_loss_D']
def train_step(self, batch):
super(DialogWaeAgent, self).train_step(batch)
if not (self.opt.get('hred', False) or self.opt.get('vhred', False)):
self.wae_gan_train_step(batch)
def eval_step(self, batch):
"""Evaluate a single batch of examples."""
if batch.text_vec is None:
return
self.model.eval()
if batch.label_vec is not None:
# calculate loss on targets with teacher forcing
_, model_output = self.compute_loss(batch, return_output=True)
if not (self.opt.get('hred', False) or self.opt.get('vhred', False)):
*_, x, c = model_output
costG, costD = self.model.wae_gan_valid(x, c)
self.metrics['loss_G_cnt'] += 1
self.metrics['loss_G'] += costG
self.metrics['loss_D_cnt'] += 1
self.metrics['loss_D'] += costD
preds = None
if self.skip_generation:
# noinspection PyTypeChecker
warn_once(
"--skip-generation does not produce accurate metrics beyond ppl",
RuntimeWarning
)
else:
sample_words, sample_lens = self.model.sample(
batch.text_vec, batch.context_lens, batch.text_lengths,
batch.floors, self.START_IDX, self.END_IDX
)
preds = torch.from_numpy(sample_words)
text = [self._v2t(p) for p in preds] if preds is not None else None
output = Output(text)
label_text = batch.labels
context = [obs['text'] for obs in batch.observations]
if label_text is not None:
self._eval_embedding_metrics(output, label_text, context)
self._eval_distinct_metrics(output, label_text)
self._eval_entropy_metrics(output, label_text)
# sampling predictions for printing
if output.text is not None:
for i in range(len(output.text)):
if random.random() > (1 - self.opt['report_freq']):
context_text = batch.observations[i]['text']
target_text = self._v2t(batch.label_vec[i])
print('TEXT: ', context_text.replace(self.dict[self.NULL_IDX], ''))
print('TARGET: ', target_text)
print('PREDICTION: ', output.text[i], '\n~')
if text and self.compute_tokenized_bleu:
# compute additional bleu scores
self._compute_fairseq_bleu(batch, preds)
self._compute_nltk_bleu(batch, text)
return output
|
129088
|
import sys, asyncio, os
from catalog import searchDomains, findOpenPorts, kafkaProducer, festin, filterRepeated, S3Store, S3Write
from aux import consumer, producer
async def dispatcher(p, kafkaQ, S3Q):
if p["port"] == "80":
await kafkaQ.put(p)
else:
await S3Q.put(p)
async def main():
tasks = []
foundDomains = asyncio.Queue()
filteredDomains = asyncio.Queue()
portsQueue = asyncio.Queue()
kafkaQueue = asyncio.Queue()
S3Queue = asyncio.Queue()
tasks.append(asyncio.create_task(producer(searchDomains, sys.argv[1], foundDomains)))
tasks.append(asyncio.create_task(producer(festin, sys.argv[1], os.environ["DNS_SERVER"], True, foundDomains)))
tasks.append(asyncio.create_task(consumer(foundDomains, findOpenPorts, 2, "80,443", filteredDomains)))
tasks.append(asyncio.create_task(consumer(filteredDomains, filterRepeated, 3, portsQueue)))
tasks.append(asyncio.create_task(consumer(portsQueue, dispatcher, 4, kafkaQueue, S3Queue)))
tasks.append(asyncio.create_task(consumer(kafkaQueue, kafkaProducer, 5, os.environ["KAFKA_SERVER"], "domainsTopic")))
tasks.append(asyncio.create_task(consumer(S3Queue, S3Store, 6)))
await asyncio.gather(*tasks)
await S3Write(os.environ["BUCKET_URI"])
asyncio.run(main())
|
129131
|
import unittest
import numpy as np
import time
import uuid
from arch.api import session
from federatedml.param.intersect_param import IntersectParam
class TestRsaIntersectGuest(unittest.TestCase):
def setUp(self):
self.jobid = str(uuid.uuid1())
session.init(self.jobid)
from federatedml.statistic.intersect.intersect_guest import RsaIntersectionGuest
from federatedml.statistic.intersect.intersect import RsaIntersect
intersect_param = IntersectParam()
self.rsa_operator = RsaIntersectionGuest(intersect_param)
self.rsa_op2 = RsaIntersect(intersect_param)
def data_to_eggroll_table(self, data):
return session.parallelize(data, include_key=True, partition=2)
def test_func_map_raw_id_to_encrypt_id(self):
d1 = [("a", 1), ("b", 2), ("c", 3)]
d2 = [(4, "a"), (5, "b"), (6, "c")]
D1 = self.data_to_eggroll_table(d1)
D2 = self.data_to_eggroll_table(d2)
res = self.rsa_operator.map_raw_id_to_encrypt_id(D1, D2)
gt = [(4,"id"),(5,"id"),(6,"id")]
self.assertListEqual(list(res.collect()), gt)
def test_hash(self):
res = str(self.rsa_op2.hash("1"))
self.assertEqual(res, "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b")
def tearDown(self):
session.stop()
try:
session.cleanup("*", self.jobid, True)
except EnvironmentError:
pass
try:
session.cleanup("*", self.jobid, False)
except EnvironmentError:
pass
if __name__ == "__main__":
unittest.main()
|
129178
|
from typing import List, Callable, Optional, Union, Dict, Awaitable, Any
from aiogram.types import InlineKeyboardButton, CallbackQuery
from aiogram_dialog.dialog import Dialog
from aiogram_dialog.manager.manager import DialogManager
from aiogram_dialog.widgets.text import Text
from aiogram_dialog.widgets.widget_event import WidgetEventProcessor, ensure_event_processor
from .base import Keyboard
from ..when import WhenCondition
OnClick = Callable[[CallbackQuery, "Button", DialogManager], Awaitable]
class Button(Keyboard):
def __init__(self, text: Text, id: str,
on_click: Union[OnClick, WidgetEventProcessor, None] = None,
when: WhenCondition = None):
super().__init__(id, when)
self.text = text
self.on_click = ensure_event_processor(on_click)
async def process_callback(self, c: CallbackQuery, dialog: Dialog, manager: DialogManager) -> bool:
if c.data != self.widget_id:
return False
await self.on_click.process_event(c, self, manager)
return True
async def _render_keyboard(self, data: Dict, manager: DialogManager) -> List[List[InlineKeyboardButton]]:
return [[
InlineKeyboardButton(
text=await self.text.render_text(data, manager),
callback_data=self.widget_id
)
]]
class Url(Keyboard):
def __init__(self, text: Text, url: Text, id: Optional[str] = None, when: Union[str, Callable, None] = None):
super().__init__(id, when)
self.text = text
self.url = url
async def _render_keyboard(self, data: Dict, manager: DialogManager) -> List[List[InlineKeyboardButton]]:
return [[
InlineKeyboardButton(
text=await self.text.render_text(data, manager),
url=await self.url.render_text(data, manager)
)
]]
|
129195
|
import socket
from p2p.connection_manager import ConnectionManager
STATE_INIT = 0
STATE_STANDBY = 1
STATE_CONNECTED_TO_NETWORK = 2
STATE_SHUTTING_DOWN = 3
class ServerCore:
def __init__(self, my_port=50082, core_node_host=None, core_node_port=None):
self.server_state = STATE_INIT
print('Initializing server...')
self.my_ip = self.__get_myip()
print('Server IP address is set to ... ', self.my_ip)
self.my_port = my_port
self.cm = ConnectionManager(self.my_ip, self.my_port)
self.core_node_host = core_node_host
self.core_node_port = core_node_port
def start(self):
self.server_state = STATE_STANDBY
self.cm.start()
def join_network(self):
if self.core_node_host is not None:
self.server_state = STATE_CONNECTED_TO_NETWORK
self.cm.join_network(self.core_node_host, self.core_node_port)
else:
print('This server is runnning as Genesis Core Node...')
def shutdown(self):
self.server_state = STATE_SHUTTING_DOWN
print('Shutdown server...')
self.cm.connection_close()
def get_my_current_state(self):
return self.server_state
def __get_myip(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
return s.getsockname()[0]
|
129199
|
from google.cloud import storage
import tempfile
import h5py
import os
class GCSH5Writer(object):
def __init__(self, fn):
self.fn = fn
if fn.startswith('gs://'):
self.gclient = storage.Client()
self.storage_dir = tempfile.TemporaryDirectory()
self.writer = h5py.File(os.path.join(self.storage_dir.name, 'temp.h5'), 'w')
self.bucket_name, self.file_name = self.fn.split('gs://', 1)[1].split('/', 1)
else:
self.gclient = None
self.bucket_name = None
self.file_name = None
self.storage_dir = None
assert not os.path.exists(self.fn)
self.writer = h5py.File(self.fn, 'w')
def create_group(self, name, track_order=None):
return self.writer.create_group(name, track_order=track_order)
def create_dataset(self, name, data, **kwargs):
return self.writer.create_dataset(name, data=data, **kwargs)
def close(self):
self.writer.close()
if self.gclient is not None:
bucket = self.gclient.get_bucket(self.bucket_name)
blob = bucket.blob(self.file_name)
blob.upload_from_filename(os.path.join(self.storage_dir.name, 'temp.h5'))
self.storage_dir.cleanup()
def __enter__(self):
# Called when entering "with" context.
return self
def __exit__(self, *_):
# Called when exiting "with" context.
# Upload shit
print("CALLING CLOSE")
self.close()
|
129207
|
from .core.warnings import high
high("DEPRECATED! Please use my.hackernews.materialistic instead.")
from .hackernews.materialistic import *
|
129248
|
from typing import Dict, Generator, Tuple, Optional, Union
import pandas as pd
import torch
import torchtext
from .torch_data import toTensor, TorchDataSet, TorchDataSetProvider
class TorchtextDataSetFromDataFrame(torchtext.data.Dataset):
"""
A specialisation of torchtext.data.Dataset, where the data is taken from a pandas.DataFrame
"""
def __init__(self, df: pd.DataFrame, fields: Dict[str, torchtext.data.Field]):
"""
:param df: the data frame from which to obtain the data
:param fields: a mapping from column names in the given data frame to torchtext fields, i.e.
the keys are the columns to read and the values are the fields to use for generated Example instances
"""
examples = df.apply(self._exampleFromSeries, args=(fields,), axis=1).tolist()
fields = dict(fields)
super().__init__(examples, fields)
@classmethod
def _exampleFromSeries(cls, series: pd.Series, fields: Dict[str, torchtext.data.Field]):
return cls._exampleFromDict(series.to_dict(), fields)
@classmethod
def _exampleFromDict(cls, d: dict, fields: Dict[str, torchtext.data.Field]):
ex = torchtext.data.Example()
for key, field in fields.items():
if key not in d:
raise ValueError("Specified key {} was not found in "
"the input data".format(key))
if field is not None:
setattr(ex, key, field.preprocess(d[key]))
else:
setattr(ex, key, d[key])
return ex
class TorchDataSetFromTorchtextDataSet(TorchDataSet):
def __init__(self, dataSet: torchtext.data.Dataset, inputField: str, outputField: Optional[str], cuda: bool):
self.outputField = outputField
self.inputField = inputField
self.dataSet = dataSet
self.cuda = cuda
def iterBatches(self, batchSize: int, shuffle: bool = False, inputOnly=False) -> Generator[Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor], None, None]:
iterator = torchtext.data.BucketIterator(self.dataSet,
batch_size=batchSize,
sort_key=lambda x: len(x.text),
sort_within_batch=False)
for batch in iterator:
x = toTensor(getattr(batch, self.inputField), self.cuda)
if not inputOnly and self.outputField is not None:
y = toTensor(getattr(batch, self.outputField), self.cuda)
yield x, y
else:
yield x
def size(self) -> Optional[int]:
return len(self.dataSet)
class TorchDataSetProviderFromTorchtextDataSet(TorchDataSetProvider):
def __init__(self, dataSet: torchtext.data.Dataset, inputField: str, outputField: str, cuda: bool, modelOutputDim, inputDim=None):
super().__init__(modelOutputDim=modelOutputDim, inputDim=inputDim)
self.dataSet = dataSet
self.outputField = outputField
self.inputField = inputField
self.cuda = cuda
def provideSplit(self, fractionalSizeOfFirstSet: float) -> Tuple[TorchDataSet, TorchDataSet]:
d1, d2 = self.dataSet.split(fractionalSizeOfFirstSet)
return self._createDataSet(d1), self._createDataSet(d2)
def _createDataSet(self, d: torchtext.data.Dataset):
return TorchDataSetFromTorchtextDataSet(d, self.inputField, self.outputField, self.cuda)
|
129261
|
from django import template
from django.utils.http import urlquote
from endpoint_monitor.models import EndpointTest
from linda_app.lists import CATEGORIES
from linda_app.models import Vocabulary, VocabularyClass, VocabularyProperty, get_configuration, \
datasource_from_endpoint
register = template.Library()
# Load user configurable settings
config = get_configuration()
@register.filter(name="nice_name")
def nice_name(user):
return user.get_full_name() or user.username
@register.filter(name="vocabularies")
def vocabularies(objects):
return [elem for elem in objects if isinstance(elem.object, Vocabulary) or isinstance(elem, Vocabulary)]
@register.filter(name="classes")
def vocabulary_classes(objects):
return [elem for elem in objects if isinstance(elem.object, VocabularyClass) or isinstance(elem, VocabularyClass)]
@register.filter(name="properties")
def vocabulary_properties(objects):
return [elem for elem in objects if isinstance(elem.object, VocabularyProperty) or isinstance(elem, VocabularyProperty)]
@register.filter(name="get_endpoint")
def get_endpoint(datasource):
return datasource.get_endpoint()
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
@register.filter(name="datasource_visualize")
def datasource_visualize(datasource):
endpoint = config.private_sparql_endpoint
graph_uri = datasource.uri
return '/visualizations/#/datasource/' + datasource.name + '/' + urlquote(endpoint, safe='') + '/' + urlquote(graph_uri, safe='') + '/rdf'
@register.filter
def sparql_version(datasource):
if datasource.is_public:
tests = EndpointTest.objects.filter(datasource=datasource, up=True).order_by('-id')
if tests:
if tests[0].supports_minus:
return "1.1"
else:
return "1.0"
else:
return ""
else:
return "1.1"
@register.filter
def domain_of(cls, limit):
return cls.domain_of()[:limit]
@register.filter
def range_of(cls, limit):
return cls.range_of()[:limit]
@register.filter
def category_display_name(category):
for c in CATEGORIES:
if c[0] == category:
return c[1]
return category
@register.filter
def label_from_uri(uri):
label = uri.split('/')[-1]
if label.find('#') >= 0:
label = uri.split('#')[-1]
return label
@register.filter
def get_datasources(query):
# get initial endpoint
dt_source = query.get_datasource()
if dt_source:
datasources = [dt_source.title]
else:
datasources = [query.endpoint]
# search for additional endpoints
lines = query.sparql.split('\n')
for line in lines:
pos = line.find('SERVICE <')
if pos < 0:
continue
start = pos + len('SERVICE <')
end = start + line[start:].find('>')
endpoint = line[start:end]
dt_source = datasource_from_endpoint(endpoint)
if dt_source:
datasources.append(dt_source.title)
else:
datasources.append(label_from_uri(endpoint))
# create string
result = datasources[0]
for dt in datasources[1:-1]:
result += ", " + dt
if len(datasources) > 1:
result += " and " + datasources[-1]
return result
|
129265
|
import numpy as np
import sys
from lstm import LstmParam, LstmNetwork
class ToyLossLayer:
"""
Computes square loss with first element of hidden layer array.
"""
@classmethod
def loss(self, pred, label):
return (pred[0] - label) ** 2
@classmethod
def bottom_diff(self, pred, label):
diff = np.zeros_like(pred)
diff[0] = 2 * (pred[0] - label)
return diff
class Primes:
def __init__(self):
self.primes = list()
for i in range(2, 100):
is_prime = True
for j in range(2, i-1):
if i % j == 0:
is_prime = False
if is_prime:
self.primes.append(i)
self.primes_count = len(self.primes)
def get_sample(self, x_dim, y_dim, index):
result = np.zeros((x_dim+y_dim))
for i in range(index, index + x_dim + y_dim):
result[i-index] = self.primes[i%self.primes_count]/100.0
return result
def example_0():
mem_cell_ct = 100
x_dim = 50
concat_len = x_dim + mem_cell_ct
lstm_param = LstmParam(mem_cell_ct, x_dim)
lstm_net = LstmNetwork(lstm_param)
primes = Primes()
x_list = []
y_list = []
for i in range(0, 10):
sample = primes.get_sample(x_dim, 1, i)
x = sample[0:x_dim]
y = sample[x_dim:x_dim+1].tolist()[0]
x_list.append(x)
y_list.append(y)
for cur_iter in range(10000):
if cur_iter % 1000 == 0:
print "y_list=", y_list
for ind in range(len(y_list)):
lstm_net.x_list_add(x_list[ind])
if cur_iter % 1000 == 0:
print "y_pred[%d] : %f" % (ind, lstm_net.lstm_node_list[ind].state.h[0])
loss = lstm_net.y_list_is(y_list, ToyLossLayer)
if cur_iter % 1000 == 0:
print "loss: ", loss
lstm_param.apply_diff(lr=0.01)
lstm_net.x_list_clear()
if __name__ == "__main__":
example_0()
|
129284
|
from ckan.logic.schema import validator_args
@validator_args
def create_user_to_organization_schema(not_empty, unicode_safe,
email_validator, business_id_validator):
return {
"fullname": [not_empty, unicode_safe],
"email": [not_empty, unicode_safe, email_validator],
"business_id": [not_empty, unicode_safe, business_id_validator],
"organization_name": [not_empty, unicode_safe]
}
|
129314
|
from __future__ import print_function
import argparse
from elasticsearch import Elasticsearch
import elasticsearch.helpers
from solr_to_es.solrSource import SlowSolrDocs
import pysolr
DEFAULT_ES_MAX_RETRIES = 15
DEFAULT_ES_INITIAL_BACKOFF = 3
class SolrEsWrapperIter:
def __init__(self, solr_itr, es_index, es_type, id_field=None):
self.index = es_index
self.type = es_type
self.id_field = id_field
self.solr_itr = iter(solr_itr)
def __iter__(self):
return self
def __next__(self):
doc = next(self.solr_itr)
new_doc = dict()
new_doc['_index'] = self.index
new_doc['_type'] = self.type
new_doc['_source'] = doc
if self.id_field:
new_doc['_id'] = doc[self.id_field]
return new_doc
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('solr_url',
type=str)
parser.add_argument('--solr-query',
type=str,
default='*:*')
parser.add_argument('--solr-filter',
type=str,
default='')
parser.add_argument('--solr-fields',
type=str,
default='')
parser.add_argument('--id-field',
type=str,
default=None)
parser.add_argument('elasticsearch_url',
type=str)
parser.add_argument('elasticsearch_index',
type=str)
parser.add_argument('doc_type',
type=str)
parser.add_argument('--rows-per-page',
type=int,
default=500)
parser.add_argument('--es-timeout',
type=int,
default=60)
parser.add_argument('--es-user',
type=str,
default='')
parser.add_argument('--es-password',
type=str,
default='')
parser.add_argument('--es-max-retries',
type=int,
default=DEFAULT_ES_MAX_RETRIES,
help='maximum number of times a document will be retried when 429 is received, set to 0 for no retries on 429. default {}'.format(DEFAULT_ES_MAX_RETRIES))
parser.add_argument('--es-initial-backoff',
type=int,
default=DEFAULT_ES_INITIAL_BACKOFF,
help='number of seconds we should wait before the first retry. Any subsequent retries will be powers of initial_backoff * 2**retry_number. default {}'.format(DEFAULT_ES_INITIAL_BACKOFF))
return vars(parser.parse_args())
def main():
try:
args = parse_args()
if args['es_user']:
es_conn = Elasticsearch(hosts=args['elasticsearch_url'], timeout=args['es_timeout'], http_auth=(args['es_user'], args['es_password']))
else:
es_conn = Elasticsearch(hosts=args['elasticsearch_url'], timeout=args['es_timeout'])
# Split the solr_url into the root and the request handler
solr_conn = pysolr.Solr(args['solr_url'].rsplit('/', 1)[0], search_handler=args['solr_url'].rsplit('/', 1)[-1])
solr_fields = args['solr_fields'].split() if args['solr_fields'] else ''
solr_filter = args['solr_filter'] if args['solr_filter'] else ''
solr_itr = SlowSolrDocs(args['solr_url'], args['solr_query'], rows=args['rows_per_page'], fl=solr_fields,
fq=solr_filter)
es_actions = SolrEsWrapperIter(solr_itr, args['elasticsearch_index'], args['doc_type'], args['id_field'])
for ok, item in elasticsearch.helpers.streaming_bulk(es_conn, es_actions, max_retries=args['es_max_retries'], initial_backoff=args['es_initial_backoff']):
if not ok:
errors.append(item)
except KeyboardInterrupt:
print('Interrupted')
if __name__ == "__main__":
main()
|
129337
|
from boa3.exception import CompilerError, CompilerWarning
from boa3.neo.vm.opcode.Opcode import Opcode
from boa3.neo.vm.type.Integer import Integer
from boa3.neo.vm.type.String import String
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.testengine import TestEngine
class TestTyping(BoaTest):
default_folder: str = 'test_sc/typing_test'
def test_cast_to_int(self):
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0 # x = cast(int, value)
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return x
+ Opcode.RET
)
path = self.get_contract_path('CastToInt.py')
output = self.assertCompilerLogs(CompilerWarning.TypeCasting, path)
self.assertEqual(expected_output, output)
def test_cast_to_str(self):
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0 # x = cast(str, value)
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return x
+ Opcode.RET
)
path = self.get_contract_path('CastToStr.py')
output = self.assertCompilerLogs(CompilerWarning.TypeCasting, path)
self.assertEqual(expected_output, output)
def test_cast_to_list(self):
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0 # x = cast(list, value)
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return x
+ Opcode.RET
)
path = self.get_contract_path('CastToList.py')
output = self.assertCompilerLogs(CompilerWarning.TypeCasting, path)
self.assertEqual(expected_output, output)
def test_cast_to_typed_list(self):
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0 # x = cast(List[int], value)
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return x[0]
+ Opcode.PUSH0
+ Opcode.DUP
+ Opcode.SIGN
+ Opcode.PUSHM1
+ Opcode.JMPNE
+ Integer(5).to_byte_array(min_length=1, signed=True)
+ Opcode.OVER
+ Opcode.SIZE
+ Opcode.ADD
+ Opcode.PICKITEM
+ Opcode.RET
)
path = self.get_contract_path('CastToTypedList.py')
output = self.assertCompilerLogs(CompilerWarning.TypeCasting, path)
self.assertEqual(expected_output, output)
def test_cast_to_dict(self):
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0 # x = cast(dict, value)
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return x
+ Opcode.RET
)
path = self.get_contract_path('CastToDict.py')
output = self.assertCompilerLogs(CompilerWarning.TypeCasting, path)
self.assertEqual(expected_output, output)
def test_cast_to_typed_dict(self):
string = String('example').to_bytes()
expected_output = (
Opcode.INITSLOT
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0 # x = cast(Dict[str, int], value)
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return x['example']
+ Opcode.PUSHDATA1
+ Integer(len(string)).to_byte_array(min_length=1)
+ string
+ Opcode.PICKITEM
+ Opcode.RET
)
path = self.get_contract_path('CastToTypedDict.py')
output = self.assertCompilerLogs(CompilerWarning.TypeCasting, path)
self.assertEqual(expected_output, output)
def test_cast_mismatched_type(self):
path = self.get_contract_path('CastMismatchedType.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
def test_cast_to_uint160(self):
path = self.get_contract_path('CastToUInt160.py')
self.assertCompilerLogs(CompilerWarning.TypeCasting, path)
engine = TestEngine()
value = bytes(range(20))
result = self.run_smart_contract(engine, path, 'Main', value,
expected_result_type=bytes)
self.assertEqual(value, result)
def test_cast_to_transaction(self):
expected_output = (
Opcode.INITSLOT # function signature
+ b'\x01'
+ b'\x01'
+ Opcode.LDARG0 # x = cast(Transaction, value)
+ Opcode.STLOC0
+ Opcode.LDLOC0 # return x
+ Opcode.RET
)
path = self.get_contract_path('CastToTransaction.py')
output = self.assertCompilerLogs(CompilerWarning.TypeCasting, path)
self.assertEqual(expected_output, output)
|
129355
|
class MTransformationMatrix(object):
"""
Manipulate the individual components of a transformation.
"""
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def asMatrix(*args, **kwargs):
"""
Interpolates between the identity transformation and that currently in the object, returning the result as an MMatrix.
"""
pass
def asMatrixInverse(*args, **kwargs):
"""
Returns the inverse of the matrix representing the transformation.
"""
pass
def asRotateMatrix(*args, **kwargs):
"""
Returns the matrix which takes points from object space to the space immediately following the scale/shear/rotation transformations.
"""
pass
def asScaleMatrix(*args, **kwargs):
"""
Returns the matrix which takes points from object space to the space immediately following scale and shear transformations.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Returns true if this transformation's matrix is within tolerance of another's matrix.
"""
pass
def reorderRotation(*args, **kwargs):
"""
Reorders the transformation's rotate component to give the same overall rotation but using a new order or rotations.
"""
pass
def rotateBy(*args, **kwargs):
"""
Adds to the transformation's rotation component.
"""
pass
def rotateByComponents(*args, **kwargs):
"""
Adds to the transformation's rotation component.
"""
pass
def rotatePivot(*args, **kwargs):
"""
Returns the transformation's rotate pivot component.
"""
pass
def rotatePivotTranslation(*args, **kwargs):
"""
Returns the transformation's rotate pivot translation component.
"""
pass
def rotation(*args, **kwargs):
"""
Returns the transformation's rotation component as either an Euler rotation or a quaternion.
"""
pass
def rotationComponents(*args, **kwargs):
"""
Returns a list containing the four components of the transformation's rotate component.
"""
pass
def rotationOrder(*args, **kwargs):
"""
Returns the order of rotations when the transformation's rotate component is expressed as an euler rotation.
"""
pass
def rotationOrientation(*args, **kwargs):
"""
Returns a quaternion which orients the local rotation space.
"""
pass
def scale(*args, **kwargs):
"""
Returns a list containing the transformation's scale components.
"""
pass
def scaleBy(*args, **kwargs):
"""
Multiplies the transformation's scale components by the three floats in the provided sequence.
"""
pass
def scalePivot(*args, **kwargs):
"""
Returns the transformation's scale pivot component.
"""
pass
def scalePivotTranslation(*args, **kwargs):
"""
Returns the transformation's scale pivot translation component.
"""
pass
def setRotatePivot(*args, **kwargs):
"""
Sets the transformation's rotate pivot component.
"""
pass
def setRotatePivotTranslation(*args, **kwargs):
"""
Sets the transformation's rotate pivot translation component.
"""
pass
def setRotation(*args, **kwargs):
"""
Sets the transformation's rotation component.
"""
pass
def setRotationComponents(*args, **kwargs):
"""
Sets the transformation's rotate component from the four values in the provided sequence.
"""
pass
def setRotationOrientation(*args, **kwargs):
"""
Sets a quaternion which orients the local rotation space.
"""
pass
def setScale(*args, **kwargs):
"""
Sets the transformation's scale components to the three floats in the provided sequence.
"""
pass
def setScalePivot(*args, **kwargs):
"""
Sets the transformation's scale pivot component.
"""
pass
def setScalePivotTranslation(*args, **kwargs):
"""
Sets the transformation's scale pivot translation component.
"""
pass
def setShear(*args, **kwargs):
"""
Sets the transformation's shear component.
"""
pass
def setToRotationAxis(*args, **kwargs):
"""
Sets the transformation's rotate component to be a given axis vector and angle in radians.
"""
pass
def setTranslation(*args, **kwargs):
"""
Sets the transformation's translation component.
"""
pass
def shear(*args, **kwargs):
"""
Returns a list containing the transformation's shear components.
"""
pass
def shearBy(*args, **kwargs):
"""
Multiplies the transformation's shear components by the three floats in the provided sequence.
"""
pass
def translateBy(*args, **kwargs):
"""
Adds a vector to the transformation's translation component.
"""
pass
def translation(*args, **kwargs):
"""
Returns the transformation's translation component as a vector.
"""
pass
__new__ = None
kIdentity = None
kInvalid = 0
kLast = 7
kTolerance = 1e-10
kXYZ = 1
kXZY = 4
kYXZ = 5
kYZX = 2
kZXY = 3
kZYX = 6
class MSyntax(object):
"""
Syntax for commands.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addArg(*args, **kwargs):
"""
Add a command argument.
"""
pass
def addFlag(*args, **kwargs):
"""
Add a flag and its arguments.
"""
pass
def makeFlagMultiUse(*args, **kwargs):
"""
Set whether a flag may be used multiple times on the command line.
"""
pass
def makeFlagQueryWithFullArgs(*args, **kwargs):
"""
Set whether a flag requires its args when queried.
"""
pass
def maxObjects(*args, **kwargs):
"""
Returns the maximum number of objects which can be passed to the command.
"""
pass
def minObjects(*args, **kwargs):
"""
Returns the minimum number of objects which can be passed to the command.
"""
pass
def setMaxObjects(*args, **kwargs):
"""
Sets the maximum number of objects which can be passed to the command.
"""
pass
def setMinObjects(*args, **kwargs):
"""
Sets the minimum number of objects which can be passed to the command.
"""
pass
def setObjectType(*args, **kwargs):
"""
Set the type and number of objects to be passed to the command.
"""
pass
def useSelectionAsDefault(*args, **kwargs):
"""
If set to True then when no objects are provided on the command-line Maya will pass the current selection instead.
"""
pass
enableEdit = None
enableQuery = None
__new__ = None
kAngle = 8
kBoolean = 2
kDistance = 7
kDouble = 4
kInvalidArgType = 0
kInvalidObjectFormat = 0
kLastArgType = 11
kLastObjectFormat = 4
kLong = 3
kNoArg = 1
kNone = 1
kSelectionItem = 10
kSelectionList = 3
kString = 5
kStringObjects = 2
kTime = 9
kUnsigned = 6
class MDoubleArray(object):
"""
Array of double values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MFnBase(object):
"""
Base class for function sets.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def hasObj(*args, **kwargs):
"""
Returns True if the function set is compatible with the specified Maya object.
"""
pass
def object(*args, **kwargs):
"""
Returns a reference to the object to which the function set is currently attached, or MObject.kNullObj if none.
"""
pass
def setObject(*args, **kwargs):
"""
Attaches the function set to the specified Maya object.
"""
pass
def type(*args, **kwargs):
"""
Returns the type of the function set.
"""
pass
__new__ = None
class MAttributePattern(object):
"""
Manipulate attribute structure patterns.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def addRootAttr(*args, **kwargs):
"""
Add the given root attribute to this pattern.
"""
pass
def name(*args, **kwargs):
"""
Return the name of the attribute pattern.
"""
pass
def removeRootAttr(*args, **kwargs):
"""
Return the nth or passed-in root attribute from this pattern.
"""
pass
def rootAttr(*args, **kwargs):
"""
Return the nth root attribute in this pattern.
"""
pass
def rootAttrCount(*args, **kwargs):
"""
Return the number of root attributes in this pattern.
"""
pass
def attrPattern(*args, **kwargs):
"""
Return the specified pattern indexed from the global list.
"""
pass
def attrPatternCount(*args, **kwargs):
"""
Return the global number of patterns created.
"""
pass
def findPattern(*args, **kwargs):
"""
Return a pattern with the given name, None if not found.
"""
pass
__new__ = None
class MFloatVectorArray(object):
"""
Array of MFloatVector values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MAngle(object):
"""
Manipulate angular data.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def asAngMinutes(*args, **kwargs):
"""
Returns the angular value, converted to minutes of arc.
"""
pass
def asAngSeconds(*args, **kwargs):
"""
Returns the angular value, converted to seconds of arc.
"""
pass
def asDegrees(*args, **kwargs):
"""
Returns the angular value, converted to degrees.
"""
pass
def asRadians(*args, **kwargs):
"""
Returns the angular value, converted to radians.
"""
pass
def asUnits(*args, **kwargs):
"""
Returns the angular value, converted to the specified units.
"""
pass
def internalToUI(*args, **kwargs):
"""
Converts a value from Maya's internal units to the units used in the UI.
"""
pass
def internalUnit(*args, **kwargs):
"""
Returns the angular unit used internally by Maya.
"""
pass
def setUIUnit(*args, **kwargs):
"""
Sets the angular unit used in Maya's UI.
"""
pass
def uiToInternal(*args, **kwargs):
"""
Converts a value from the units used in the UI to Maya's internal units.
"""
pass
def uiUnit(*args, **kwargs):
"""
Returns the units used to display angles in Maya's UI.
"""
pass
unit = None
value = None
__new__ = None
kAngMinutes = 3
kAngSeconds = 4
kDegrees = 2
kInvalid = 0
kLast = 5
kRadians = 1
class MEulerRotation(object):
"""
X, Y and Z rotations, applied in a specified order.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __neg__(*args, **kwargs):
"""
x.__neg__() <==> -x
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def alternateSolution(*args, **kwargs):
"""
Returns an equivalent rotation which is not simply a multiple.
"""
pass
def asMatrix(*args, **kwargs):
"""
Returns the rotation as an equivalent matrix.
"""
pass
def asQuaternion(*args, **kwargs):
"""
Returns the rotation as an equivalent quaternion.
"""
pass
def asVector(*args, **kwargs):
"""
Returns the X, Y and Z rotations as a vector.
"""
pass
def bound(*args, **kwargs):
"""
Returns a new MEulerRotation having this rotation, but with each rotation component bound within +/- PI.
"""
pass
def boundIt(*args, **kwargs):
"""
In-place bounding of each rotation component to lie wthin +/- PI.
"""
pass
def closestCut(*args, **kwargs):
"""
Returns the rotation which is full spin multiples of this one and comes closest to target.
"""
pass
def closestSolution(*args, **kwargs):
"""
Returns the equivalent rotation which comes closest to a target.
"""
pass
def incrementalRotateBy(*args, **kwargs):
"""
Increase this rotation by a given angle around the specified axis. The update is done in series of small increments to avoid flipping.
"""
pass
def inverse(*args, **kwargs):
"""
Returns a new MEulerRotation containing the inverse rotation of this one and reversed rotation order.
"""
pass
def invertIt(*args, **kwargs):
"""
In-place inversion of the rotation. Rotation order is also reversed.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Returns true if this rotation has the same order as another and their X, Y and Z components are within a tolerance of each other.
"""
pass
def isZero(*args, **kwargs):
"""
Returns true if the X, Y and Z components are each within a tolerance of 0.0.
"""
pass
def reorder(*args, **kwargs):
"""
Returns a new MEulerRotation having this rotation, reordered to use the given rotation order.
"""
pass
def reorderIt(*args, **kwargs):
"""
In-place reordering to use the given rotation order.
"""
pass
def setToAlternateSolution(*args, **kwargs):
"""
Replace this rotation with an alternate solution.
"""
pass
def setToClosestCut(*args, **kwargs):
"""
Replace this rotation with the closest cut to a target.
"""
pass
def setToClosestSolution(*args, **kwargs):
"""
Replace this rotation with the closest solution to a target.
"""
pass
def setValue(*args, **kwargs):
"""
Set the rotation.
"""
pass
def computeAlternateSolution(*args, **kwargs):
"""
Returns an equivalent rotation which is not simply a multiple.
"""
pass
def computeBound(*args, **kwargs):
"""
Returns an equivalent rotation with each rotation component bound within +/- PI.
"""
pass
def computeClosestCut(*args, **kwargs):
"""
Returns the rotation which is full spin multiples of the src and comes closest to target.
"""
pass
def computeClosestSolution(*args, **kwargs):
"""
Returns the equivalent rotation which comes closest to a target.
"""
pass
def decompose(*args, **kwargs):
"""
Extracts a rotation from a matrix.
"""
pass
order = None
x = None
y = None
z = None
__new__ = None
kIdentity = None
kTolerance = 1e-10
kXYZ = 0
kXZY = 3
kYXZ = 4
kYZX = 1
kZXY = 2
kZYX = 5
class MBoundingBox(object):
"""
3D axis-aligned bounding box.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def clear(*args, **kwargs):
"""
Empties the bounding box, setting its corners to (0, 0, 0).
"""
pass
def contains(*args, **kwargs):
"""
Returns True if a point lies within the bounding box.
"""
pass
def expand(*args, **kwargs):
"""
Expands the bounding box to include a point or other bounding box.
"""
pass
def intersects(*args, **kwargs):
"""
Returns True if any part of a given bounding box lies within this one.
"""
pass
def transformUsing(*args, **kwargs):
"""
Multiplies the bounding box's corners by a matrix.
"""
pass
center = None
depth = None
height = None
max = None
min = None
width = None
__new__ = None
class MUint64Array(object):
"""
Array of MUint64 values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MIntArray(object):
"""
Array of int values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MDistance(object):
"""
Manipulate distance data.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def asCentimeters(*args, **kwargs):
"""
Return the distance value, converted to centimeters.
"""
pass
def asFeet(*args, **kwargs):
"""
Return the distance value, converted to feet.
"""
pass
def asInches(*args, **kwargs):
"""
Return the distance value, converted to inches.
"""
pass
def asKilometers(*args, **kwargs):
"""
Return the distance value, converted to kilometers.
"""
pass
def asMeters(*args, **kwargs):
"""
Return the distance value, converted to meters.
"""
pass
def asMiles(*args, **kwargs):
"""
Return the distance value, converted to miles.
"""
pass
def asMillimeters(*args, **kwargs):
"""
Return the distance value, converted to millimeters.
"""
pass
def asUnits(*args, **kwargs):
"""
Return the distance value, converted to the specified units.
"""
pass
def asYards(*args, **kwargs):
"""
Return the distance value, converted to yards.
"""
pass
def internalToUI(*args, **kwargs):
"""
Convert a value from Maya's internal units to the units used in the UI.
"""
pass
def internalUnit(*args, **kwargs):
"""
Return the distance unit used internally by Maya.
"""
pass
def setUIUnit(*args, **kwargs):
"""
Change the units used to display distances in Maya's UI.
"""
pass
def uiToInternal(*args, **kwargs):
"""
Convert a value from the units used in the UI to Maya's internal units.
"""
pass
def uiUnit(*args, **kwargs):
"""
Return the units used to display distances in Maya's UI.
"""
pass
unit = None
value = None
__new__ = None
kCentimeters = 6
kFeet = 2
kInches = 1
kInvalid = 0
kKilometers = 7
kLast = 9
kMeters = 8
kMiles = 4
kMillimeters = 5
kYards = 3
class MUintArray(object):
"""
Array of unsigned int values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MMatrix(object):
"""
4x4 matrix with double-precision elements.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def adjoint(*args, **kwargs):
"""
Returns a new matrix containing this matrix's adjoint.
"""
pass
def det3x3(*args, **kwargs):
"""
Returns the determinant of the 3x3 matrix formed by the first 3 elements of the first 3 rows of this matrix.
"""
pass
def det4x4(*args, **kwargs):
"""
Returns this matrix's determinant.
"""
pass
def getElement(*args, **kwargs):
"""
Returns the matrix element for the specified row and column.
"""
pass
def homogenize(*args, **kwargs):
"""
Returns a new matrix containing the homogenized version of this matrix.
"""
pass
def inverse(*args, **kwargs):
"""
Returns a new matrix containing this matrix's inverse.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Test for equivalence of two matrices, within a tolerance.
"""
pass
def isSingular(*args, **kwargs):
"""
Returns True if this matrix is singular.
"""
pass
def setElement(*args, **kwargs):
"""
Sets the matrix element for the specified row and column.
"""
pass
def setToIdentity(*args, **kwargs):
"""
Sets this matrix to the identity.
"""
pass
def setToProduct(*args, **kwargs):
"""
Sets this matrix to the product of the two matrices passed in.
"""
pass
def transpose(*args, **kwargs):
"""
Returns a new matrix containing this matrix's transpose.
"""
pass
__new__ = None
kIdentity = None
kTolerance = 1e-10
class MDagPath(object):
"""
Path to a DAG node from the top of the DAG.
"""
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def apiType(*args, **kwargs):
"""
Returns the type of the object at the end of the path.
"""
pass
def child(*args, **kwargs):
"""
Returns the specified child of the object at the end of the path.
"""
pass
def childCount(*args, **kwargs):
"""
Returns the number of objects parented directly beneath the object at the end of the path.
"""
pass
def exclusiveMatrix(*args, **kwargs):
"""
Returns the matrix for all transforms in the path, excluding the end object.
"""
pass
def exclusiveMatrixInverse(*args, **kwargs):
"""
Returns the inverse of exclusiveMatrix().
"""
pass
def extendToShape(*args, **kwargs):
"""
Extends the path to the specified shape node parented directly beneath the transform at the current end of the path.
"""
pass
def fullPathName(*args, **kwargs):
"""
Returns a string representation of the path from the DAG root to the path's last node.
"""
pass
def getPath(*args, **kwargs):
"""
Returns the specified sub-path of this path.
"""
pass
def hasFn(*args, **kwargs):
"""
Returns True if the object at the end of the path supports the given function set.
"""
pass
def inclusiveMatrix(*args, **kwargs):
"""
Returns the matrix for all transforms in the path, including the end object, if it is a transform.
"""
pass
def inclusiveMatrixInverse(*args, **kwargs):
"""
Returns the inverse of inclusiveMatrix().
"""
pass
def instanceNumber(*args, **kwargs):
"""
Returns the instance number of this path to the object at the end.
"""
pass
def isInstanced(*args, **kwargs):
"""
Returns True if the object at the end of the path can be reached by more than one path.
"""
pass
def isTemplated(*args, **kwargs):
"""
Returns true if the DAG Node at the end of the path is templated.
"""
pass
def isValid(*args, **kwargs):
"""
Returns True if this is a valid path.
"""
pass
def isVisible(*args, **kwargs):
"""
Returns true if the DAG Node at the end of the path is visible.
"""
pass
def length(*args, **kwargs):
"""
Returns the number of nodes on the path, not including the DAG's root node.
"""
pass
def node(*args, **kwargs):
"""
Returns the DAG node at the end of the path.
"""
pass
def numberOfShapesDirectlyBelow(*args, **kwargs):
"""
Returns the number of shape nodes parented directly beneath the transform at the end of the path.
"""
pass
def partialPathName(*args, **kwargs):
"""
Returns the minimum string representation which will uniquely identify the path.
"""
pass
def pathCount(*args, **kwargs):
"""
Returns the number of sub-paths which make up this path.
"""
pass
def pop(*args, **kwargs):
"""
Removes objects from the end of the path.
"""
pass
def push(*args, **kwargs):
"""
Extends the path to the specified child object, which must be parented directly beneath the object currently at the end of the path.
"""
pass
def set(*args, **kwargs):
"""
Replaces the current path held by this object with another.
"""
pass
def transform(*args, **kwargs):
"""
Returns the last transform node on the path.
"""
pass
def getAPathTo(*args, **kwargs):
"""
Returns the first path found to the given node.
"""
pass
def getAllPathsTo(*args, **kwargs):
"""
Returns all paths to the given node.
"""
pass
__new__ = None
class MTime(object):
"""
Manipulate time data.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __div__(*args, **kwargs):
"""
x.__div__(y) <==> x/y
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __idiv__(*args, **kwargs):
"""
x.__idiv__(y) <==> x/=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __rdiv__(*args, **kwargs):
"""
x.__rdiv__(y) <==> y/x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def asUnits(*args, **kwargs):
"""
Return the time value, converted to the specified units.
"""
pass
def setUIUnit(*args, **kwargs):
"""
Change the units used to display time in Maya's UI.
"""
pass
def uiUnit(*args, **kwargs):
"""
Return the units used to display time in Maya's UI.
"""
pass
unit = None
value = None
__new__ = None
k100FPS = 25
k10FPS = 18
k1200FPS = 38
k120FPS = 26
k125FPS = 27
k12FPS = 19
k1500FPS = 39
k150FPS = 28
k16FPS = 20
k2000FPS = 40
k200FPS = 29
k20FPS = 21
k240FPS = 30
k250FPS = 31
k2FPS = 12
k3000FPS = 41
k300FPS = 32
k375FPS = 33
k3FPS = 13
k400FPS = 34
k40FPS = 22
k4FPS = 14
k500FPS = 35
k5FPS = 15
k6000FPS = 42
k600FPS = 36
k6FPS = 16
k750FPS = 37
k75FPS = 23
k80FPS = 24
k8FPS = 17
kFilm = 6
kGames = 5
kHours = 1
kInvalid = 0
kLast = 44
kMilliseconds = 4
kMinutes = 2
kNTSCField = 11
kNTSCFrame = 8
kPALField = 10
kPALFrame = 7
kSeconds = 3
kShowScan = 9
kUserDef = 43
class MMeshIsectAccelParams(object):
"""
Opaque class used to store parameters used by MFnMesh's
intersection calculations for later re-use.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
__new__ = None
class MFloatArray(object):
"""
Array of float values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MVector(object):
"""
3D vector with double-precision coordinates.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __div__(*args, **kwargs):
"""
x.__div__(y) <==> x/y
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __idiv__(*args, **kwargs):
"""
x.__idiv__(y) <==> x/=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __neg__(*args, **kwargs):
"""
x.__neg__() <==> -x
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __rdiv__(*args, **kwargs):
"""
x.__rdiv__(y) <==> y/x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __rxor__(*args, **kwargs):
"""
x.__rxor__(y) <==> y^x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def __xor__(*args, **kwargs):
"""
x.__xor__(y) <==> x^y
"""
pass
def angle(*args, **kwargs):
"""
Returns the angle, in radians, between this vector and another.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Returns True if this vector and another are within a given tolerance of being equal.
"""
pass
def isParallel(*args, **kwargs):
"""
Returns True if this vector and another are within the given tolerance of being parallel.
"""
pass
def length(*args, **kwargs):
"""
Returns the magnitude of this vector.
"""
pass
def normal(*args, **kwargs):
"""
Returns a new vector containing the normalized version of this one.
"""
pass
def normalize(*args, **kwargs):
"""
Normalizes this vector in-place and returns a new reference to it.
"""
pass
def rotateBy(*args, **kwargs):
"""
Returns the vector resulting from rotating this one by the given amount.
"""
pass
def rotateTo(*args, **kwargs):
"""
Returns the quaternion which will rotate this vector into another.
"""
pass
def transformAsNormal(*args, **kwargs):
"""
Returns a new vector which is calculated by postmultiplying this vector by the transpose of the given matrix's inverse and then normalizing the result.
"""
pass
x = None
y = None
z = None
__new__ = None
kOneVector = None
kTolerance = 1e-10
kWaxis = 3
kXaxis = 0
kXaxisVector = None
kXnegAxisVector = None
kYaxis = 1
kYaxisVector = None
kYnegAxisVector = None
kZaxis = 2
kZaxisVector = None
kZeroVector = None
kZnegAxisVector = None
class MDGModifier(object):
"""
Used to change the structure of the dependency graph.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addAttribute(*args, **kwargs):
"""
addAttribute(MObject node, MObject attribute) -> self
Adds an operation to the modifier to add a new dynamic attribute to the
given dependency node. If the attribute is a compound its children will
be added as well, so only the parent needs to be added using this method.
"""
pass
def addExtensionAttribute(*args, **kwargs):
"""
addExtensionAttribute(MNodeClass nodeClass, MObject attribute) -> self
Adds an operation to the modifier to add a new extension attribute to
the given node class. If the attribute is a compound its children will be
added as well, so only the parent needs to be added using this method.
"""
pass
def commandToExecute(*args, **kwargs):
"""
commandToExecute(command) -> self
Adds an operation to the modifier to execute a MEL command. The specified
command must be undoable otherwise unexpected results may occur. It is
best to use multiple commandToExecute() calls rather than batching
multiple commands into one call to commandToExecute(). They will still
be undone together, as a single undo action by the user, but Maya will
better be able to recover if one of the commands fails.
"""
pass
def connect(*args, **kwargs):
"""
connect(MPlug source, MPlug dest) -> self
connect(MObject sourceNode, MObject sourceAttr,
MObject destNode, MObject destAttr) -> self
Adds an operation to the modifier that connects two plugs in the
dependency graph. It is the user's responsibility to ensure that the
source and destination attributes are of compatible types. For instance,
if the source attribute is a nurbs surface then the destination must
also be a nurbs surface.
Plugs can either be specified with node and attribute MObjects or with
MPlugs.
"""
pass
def createNode(*args, **kwargs):
"""
createNode(typeName) -> MObject
createNode(MTypeId typeId) -> MObject
Adds an operation to the modifier to create a node of the given type.
The new node is created and returned but will not be added to the
Dependency Graph until the modifier's doIt() method is called. Raises
TypeError if the named node type does not exist or if it is a DAG node
type.
"""
pass
def deleteNode(*args, **kwargs):
"""
deleteNode(MObject node) -> self
Adds an operation to the modifer which deletes the specified node from
the Dependency Graph. If the modifier already contains other operations
on the same node (e.g. a disconnect) then they should be committed by
calling the modifier's doIt() before the deleteNode operation is added.
"""
pass
def disconnect(*args, **kwargs):
"""
disconnect(MPlug source, MPlug dest) -> self
disconnect(MObject sourceNode, MObject sourceAttr,
MObject destNode, MObject destAttr) -> self
Adds an operation to the modifier that breaks a connection between two
plugs in the dependency graph.
Plugs can either be specified with node and attribute MObjects or with
MPlugs.
"""
pass
def doIt(*args, **kwargs):
"""
doIt() -> self
Executes the modifier's operations. If doIt() is called multiple times
in a row, without any intervening calls to undoIt(), then only the
operations which were added since the previous doIt() call will be
executed. If undoIt() has been called then the next call to doIt() will
do all operations.
"""
pass
def linkExtensionAttributeToPlugin(*args, **kwargs):
"""
linkExtensionAttributeToPlugin(MObject plugin, MObject attribute) -> self
The plugin can call this method to indicate that the extension attribute
defines part of the plugin, regardless of the node type to which it
attaches itself. This requirement is used when the plugin is checked to
see if it is in use or if is able to be unloaded or if it is required as
part of a stored file. For compound attributes only the topmost parent
attribute may be passed in and all of its children will be included,
recursively. Thus it's not possible to link a child attribute to a
plugin by itself. Note that the link is established immediately and is
not affected by the modifier's doIt() or undoIt() methods.
"""
pass
def newPlugValue(*args, **kwargs):
"""
newPlugValue(MPlug plug, MObject value) -> self
Adds an operation to the modifier to set the value of a plug, where
value is an MObject data wrapper, such as created by the various
MFn*Data classes.
"""
pass
def newPlugValueBool(*args, **kwargs):
"""
newPlugValueBool(MPlug plug, bool value) -> self
Adds an operation to the modifier to set a value onto a bool plug.
"""
pass
def newPlugValueChar(*args, **kwargs):
"""
newPlugValueChar(MPlug plug, int value) -> self
Adds an operation to the modifier to set a value onto a char (single
byte signed integer) plug.
"""
pass
def newPlugValueDouble(*args, **kwargs):
"""
newPlugValueDouble(MPlug plug, float value) -> self
Adds an operation to the modifier to set a value onto a double-precision
float plug.
"""
pass
def newPlugValueFloat(*args, **kwargs):
"""
newPlugValueFloat(MPlug plug, float value) -> self
Adds an operation to the modifier to set a value onto a single-precision
float plug.
"""
pass
def newPlugValueInt(*args, **kwargs):
"""
newPlugValueInt(MPlug plug, int value) -> self
Adds an operation to the modifier to set a value onto an int plug.
"""
pass
def newPlugValueMAngle(*args, **kwargs):
"""
newPlugValueMAngle(MPlug plug, MAngle value) -> self
Adds an operation to the modifier to set a value onto an angle plug.
"""
pass
def newPlugValueMDistance(*args, **kwargs):
"""
newPlugValueMDistance(MPlug plug, MDistance value) -> self
Adds an operation to the modifier to set a value onto a distance plug.
"""
pass
def newPlugValueMTime(*args, **kwargs):
"""
newPlugValueMTime(MPlug plug, MTime value) -> self
Adds an operation to the modifier to set a value onto a time plug.
"""
pass
def newPlugValueShort(*args, **kwargs):
"""
newPlugValueShort(MPlug plug, int value) -> self
Adds an operation to the modifier to set a value onto a short
integer plug.
"""
pass
def newPlugValueString(*args, **kwargs):
"""
newPlugValueString(MPlug plug, string value) -> self
Adds an operation to the modifier to set a value onto a string plug.
"""
pass
def removeAttribute(*args, **kwargs):
"""
removeAttribute(MObject node, MObject attribute) -> self
Adds an operation to the modifier to remove a dynamic attribute from the
given dependency node. If the attribute is a compound its children will
be removed as well, so only the parent needs to be removed using this
method. The attribute MObject passed in will be set to kNullObj. There
should be no function sets attached to the attribute at the time of the
call as their behaviour may become unpredictable.
"""
pass
def removeExtensionAttribute(*args, **kwargs):
"""
removeExtensionAttribute(MNodeClass nodeClass, MObject attribute) -> self
Adds an operation to the modifier to remove an extension attribute from
the given node class. If the attribute is a compound its children will
be removed as well, so only the parent needs to be removed using this
method. The attribute MObject passed in will be set to kNullObj. There
should be no function sets attached to the attribute at the time of the
call as their behaviour may become unpredictable.
"""
pass
def removeExtensionAttributeIfUnset(*args, **kwargs):
"""
removeExtensionAttributeIfUnset(MNodeClass nodeClass,
MObject attribute) -> self
Adds an operation to the modifier to remove an extension attribute from
the given node class, but only if there are no nodes in the graph with
non-default values for this attribute. If the attribute is a compound
its children will be removed as well, so only the parent needs to be
removed using this method. The attribute MObject passed in will be set
to kNullObj. There should be no function sets attached to the attribute
at the time of the call as their behaviour may become unpredictable.
"""
pass
def renameNode(*args, **kwargs):
"""
renameNode(MObject node, string newName) -> self
Adds an operation to the modifer to rename a node.
"""
pass
def setNodeLockState(*args, **kwargs):
"""
setNodeLockState(MObject node, bool newState) -> self
Adds an operation to the modifier to set the lockState of a node.
"""
pass
def undoIt(*args, **kwargs):
"""
undoIt() -> self
Undoes all of the operations that have been given to this modifier. It
is only valid to call this method after the doIt() method has been
called.
"""
pass
def unlinkExtensionAttributeFromPlugin(*args, **kwargs):
"""
unlinkExtensionAttributeFromPlugin(MObject plugin,
MObject attribute) -> self
The plugin can call this method to indicate that it no longer requires
an extension attribute for its operation. This requirement is used when
the plugin is checked to see if it is in use or if is able to be unloaded
or if it is required as part of a stored file. For compound attributes
only the topmost parent attribute may be passed in and all of its
children will be unlinked, recursively. Thus it's not possible to unlink
a child attribute from a plugin by itself. Note that the link is broken
immediately and is not affected by the modifier's doIt() or undoIt()
methods.
"""
pass
__new__ = None
class MSpace(object):
"""
Static class providing coordinate space constants.
"""
kInvalid = 0
kLast = 5
kObject = 2
kPostTransform = 3
kPreTransform = 2
kTransform = 1
kWorld = 4
class MColorArray(object):
"""
Array of MColor values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MPoint(object):
"""
3D point with double-precision coordinates.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __div__(*args, **kwargs):
"""
x.__div__(y) <==> x/y
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __rdiv__(*args, **kwargs):
"""
x.__rdiv__(y) <==> y/x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def cartesianize(*args, **kwargs):
"""
Convert point to cartesian form.
"""
pass
def distanceTo(*args, **kwargs):
"""
Return distance between this point and another.
"""
pass
def homogenize(*args, **kwargs):
"""
Convert point to homogenous form.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Test for equivalence of two points, within a tolerance.
"""
pass
def rationalize(*args, **kwargs):
"""
Convert point to rational form.
"""
pass
w = None
x = None
y = None
z = None
__new__ = None
kOrigin = None
kTolerance = 1e-10
class MFloatMatrix(object):
"""
4x4 matrix with single-precision elements.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def adjoint(*args, **kwargs):
"""
Returns a new matrix containing this matrix's adjoint.
"""
pass
def det3x3(*args, **kwargs):
"""
Returns the determinant of the 3x3 matrix formed by the first 3 elements of the first 3 rows of this matrix.
"""
pass
def det4x4(*args, **kwargs):
"""
Returns this matrix's determinant.
"""
pass
def getElement(*args, **kwargs):
"""
Returns the matrix element for the specified row and column.
"""
pass
def homogenize(*args, **kwargs):
"""
Returns a new matrix containing the homogenized version of this matrix.
"""
pass
def inverse(*args, **kwargs):
"""
Returns a new matrix containing this matrix's inverse.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Test for equivalence of two matrices, within a tolerance.
"""
pass
def setElement(*args, **kwargs):
"""
Sets the matrix element for the specified row and column.
"""
pass
def setToIdentity(*args, **kwargs):
"""
Sets this matrix to the identity.
"""
pass
def setToProduct(*args, **kwargs):
"""
Sets this matrix to the product of the two matrices passed in.
"""
pass
def transpose(*args, **kwargs):
"""
Returns a new matrix containing this matrix's transpose.
"""
pass
__new__ = None
kTolerance = 9.999999747378752e-06
class MDagPathArray(object):
"""
Array of MDagPath values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MObjectArray(object):
"""
Array of MObject values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MDGContext(object):
"""
Dependency graph context.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def getTime(*args, **kwargs):
"""
Returns the time at which this context is set to evaluate.
"""
pass
def isNormal(*args, **kwargs):
"""
Returns True if the context is set to evaluate normally. Returns False if the context is set to evaluate at a specific time.
"""
pass
__new__ = None
kNormal = None
class MVectorArray(object):
"""
Array of MVector values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MMeshSmoothOptions(object):
"""
Options for control of smooth mesh generation.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
boundaryRule = None
divisions = None
keepBorderEdge = None
keepHardEdge = None
propEdgeHardness = None
smoothUVs = None
smoothness = None
__new__ = None
kCreaseAll = 1
kCreaseEdge = 2
kInvalid = -1
kLast = 3
kLegacy = 0
class MPointArray(object):
"""
Array of MPoint values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MTypeId(object):
"""
Stores a Maya object type identifier.
"""
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def id(*args, **kwargs):
"""
Returns the type id as a long.
"""
pass
__new__ = None
class MFloatPoint(object):
"""
3D point with single-precision coordinates.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __div__(*args, **kwargs):
"""
x.__div__(y) <==> x/y
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __rdiv__(*args, **kwargs):
"""
x.__rdiv__(y) <==> y/x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def cartesianize(*args, **kwargs):
"""
Convert point to cartesian form.
"""
pass
def distanceTo(*args, **kwargs):
"""
Return distance between this point and another.
"""
pass
def homogenize(*args, **kwargs):
"""
Convert point to homogenous form.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Test for equivalence of two points, within a tolerance.
"""
pass
def rationalize(*args, **kwargs):
"""
Convert point to rational form.
"""
pass
w = None
x = None
y = None
z = None
__new__ = None
kOrigin = None
kTolerance = 1e-10
class MPlug(object):
"""
Create and access dependency node plugs.
"""
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def array(*args, **kwargs):
"""
Returns a plug for the array of plugs of which this plug is an element.
"""
pass
def asBool(*args, **kwargs):
"""
Retrieves the plug's value, as a boolean.
"""
pass
def asChar(*args, **kwargs):
"""
Retrieves the plug's value, as a single-byte integer.
"""
pass
def asDouble(*args, **kwargs):
"""
Retrieves the plug's value, as a double-precision float.
"""
pass
def asFloat(*args, **kwargs):
"""
Retrieves the plug's value, as a single-precision float.
"""
pass
def asInt(*args, **kwargs):
"""
Retrieves the plug's value, as a regular integer.
"""
pass
def asMAngle(*args, **kwargs):
"""
Retrieves the plug's value, as an MAngle.
"""
pass
def asMDistance(*args, **kwargs):
"""
Retrieves the plug's value, as an MDistance.
"""
pass
def asMObject(*args, **kwargs):
"""
Retrieves the plug's value, as as an MObject containing a direct reference to the plug's data.
"""
pass
def asMTime(*args, **kwargs):
"""
Retrieves the plug's value, as an MTime.
"""
pass
def asShort(*args, **kwargs):
"""
Retrieves the plug's value, as a short integer.
"""
pass
def asString(*args, **kwargs):
"""
Retrieves the plug's value, as a string.
"""
pass
def attribute(*args, **kwargs):
"""
Returns the attribute currently referenced by this plug.
"""
pass
def child(*args, **kwargs):
"""
Returns a plug for the specified child attribute of this plug.
"""
pass
def connectedTo(*args, **kwargs):
"""
Returns an array of plugs which are connected to this one.
"""
pass
def connectionByPhysicalIndex(*args, **kwargs):
"""
Returns a plug for the index'th connected element of this plug.
"""
pass
def constructHandle(*args, **kwargs):
"""
Constructs a data handle for the plug.
"""
pass
def destructHandle(*args, **kwargs):
"""
Destroys a data handle previously constructed using constructHandle().
"""
pass
def elementByLogicalIndex(*args, **kwargs):
"""
Returns a plug for the element of this plug array having the specified logical index.
"""
pass
def elementByPhysicalIndex(*args, **kwargs):
"""
Returns a plug for the element of this plug array having the specified physical index.
"""
pass
def evaluateNumElements(*args, **kwargs):
"""
Like numElements() but evaluates all connected elements first to ensure that they are included in the count.
"""
pass
def getExistingArrayAttributeIndices(*args, **kwargs):
"""
Returns an array of all the plug's logical indices which are currently in use.
"""
pass
def getSetAttrCmds(*args, **kwargs):
"""
Returns a list of strings containing the setAttr commands (in MEL syntax) for this plug and all of its descendents.
"""
pass
def isFreeToChange(*args, **kwargs):
"""
Returns a value indicating if the plug's value can be changed, after taking into account the effects of locking and connections.
"""
pass
def logicalIndex(*args, **kwargs):
"""
Returns this plug's logical index within its parent array.
"""
pass
def name(*args, **kwargs):
"""
Returns the name of the plug.
"""
pass
def node(*args, **kwargs):
"""
Returns the node that this plug belongs to.
"""
pass
def numChildren(*args, **kwargs):
"""
Returns the number of children this plug has.
"""
pass
def numConnectedChildren(*args, **kwargs):
"""
Returns the number of this plug's children which have connections.
"""
pass
def numConnectedElements(*args, **kwargs):
"""
Returns the number of this plug's elements which have connections.
"""
pass
def numElements(*args, **kwargs):
"""
Returns the number of the plug's logical indices which are currently in use. Connected elements which have not yet been evaluated may not yet fully exist and may be excluded from the count.
"""
pass
def parent(*args, **kwargs):
"""
Returns a plug for the parent of this plug.
"""
pass
def partialName(*args, **kwargs):
"""
Returns the name of the plug, formatted according to various criteria.
"""
pass
def selectAncestorLogicalIndex(*args, **kwargs):
"""
Changes the logical index of the specified attribute in the plug's path.
"""
pass
def setAttribute(*args, **kwargs):
"""
Switches the plug to reference the given attribute of the same node as the previously referenced attribute.
"""
pass
def setBool(*args, **kwargs):
"""
Sets the plug's value as a boolean.
"""
pass
def setChar(*args, **kwargs):
"""
Sets the plug's value as a single-byte integer.
"""
pass
def setDouble(*args, **kwargs):
"""
Sets the plug's value as a double-precision float.
"""
pass
def setFloat(*args, **kwargs):
"""
Sets the plug's value as a single-precision float.
"""
pass
def setInt(*args, **kwargs):
"""
Sets the plug's value as a regular integer.
"""
pass
def setMAngle(*args, **kwargs):
"""
Sets the plug's value as an MAngle.
"""
pass
def setMDataHandle(*args, **kwargs):
"""
Sets the plug's value as a data handle.
"""
pass
def setMDistance(*args, **kwargs):
"""
Sets the plug's value as an MDistance.
"""
pass
def setMObject(*args, **kwargs):
"""
Sets the plug's value as an MObject.
"""
pass
def setMPxData(*args, **kwargs):
"""
Sets the plug's value using custom plug-in data.
"""
pass
def setMTime(*args, **kwargs):
"""
Sets the plug's value as an MTime.
"""
pass
def setNumElements(*args, **kwargs):
"""
Pre-allocates space for count elements in an array of plugs.
"""
pass
def setShort(*args, **kwargs):
"""
Sets the plug's value as a short integer.
"""
pass
def setString(*args, **kwargs):
"""
Sets the plug's value as a string.
"""
pass
info = None
isArray = None
isCaching = None
isChannelBox = None
isChild = None
isCompound = None
isConnected = None
isDestination = None
isDynamic = None
isElement = None
isFromReferencedFile = None
isIgnoredWhenRendering = None
isKeyable = None
isLocked = None
isNetworked = None
isNull = None
isProcedural = None
isSource = None
__new__ = None
kAll = 0
kChanged = 2
kChildrenNotFreeToChange = 2
kFreeToChange = 0
kLastAttrSelector = 3
kNonDefault = 1
kNotFreeToChange = 1
class MArgParser(object):
"""
Command argument list parser.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def commandArgumentBool(*args, **kwargs):
"""
commandArgumentBool(argIndex) -> bool
Returns the specified command argument as a bool.
"""
pass
def commandArgumentDouble(*args, **kwargs):
"""
Alias for commandArgumentFloat().
"""
pass
def commandArgumentFloat(*args, **kwargs):
"""
commandArgumentFloat(argIndex) -> float
Returns the specified command argument as a float.
"""
pass
def commandArgumentInt(*args, **kwargs):
"""
commandArgumentInt(argIndex) -> int
Returns the specified command argument as an int.
"""
pass
def commandArgumentMAngle(*args, **kwargs):
"""
commandArgumentMAngle(argIndex) -> MAngle
Returns the specified command argument as an MAngle.
"""
pass
def commandArgumentMDistance(*args, **kwargs):
"""
commandArgumentMDistance(argIndex) -> MDistance
Returns the specified command argument as an MDistance.
"""
pass
def commandArgumentMTime(*args, **kwargs):
"""
commandArgumentMTime(argIndex) -> MTime
Returns the specified command argument as an MTime.
"""
pass
def commandArgumentString(*args, **kwargs):
"""
commandArgumentString(argIndex) -> unicode string
Returns the specified command argument as a string.
"""
pass
def flagArgumentBool(*args, **kwargs):
"""
flagArgumentBool(flagName, argIndex) -> bool
Returns the specified argument of the specified single-use flag as
a bool.
"""
pass
def flagArgumentDouble(*args, **kwargs):
"""
flagArgumentDouble(flagName, argIndex) -> float
Alias for flagArgumentFloat().
"""
pass
def flagArgumentFloat(*args, **kwargs):
"""
flagArgumentFloat(flagName, argIndex) -> float
Returns the specified argument of the specified single-use flag as
a float.
"""
pass
def flagArgumentInt(*args, **kwargs):
"""
flagArgumentInt(flagName, argIndex) -> int
Returns the specified argument of the specified single-use flag as
an int.
"""
pass
def flagArgumentMAngle(*args, **kwargs):
"""
flagArgumentMAngle(flagName, argIndex) -> MAngle
Returns the specified argument of the specified single-use flag as
an MAngle.
"""
pass
def flagArgumentMDistance(*args, **kwargs):
"""
flagArgumentMDistance(flagName, argIndex) -> MDistance
Returns the specified argument of the specified single-use flag as
an MDistance.
"""
pass
def flagArgumentMTime(*args, **kwargs):
"""
flagArgumentMTime(flagName, argIndex) -> MTime
Returns the specified argument of the specified single-use flag as
an MTime.
"""
pass
def flagArgumentString(*args, **kwargs):
"""
flagArgumentString(flagName, argIndex) -> string
Returns the specified argument of the specified single-use flag as
a string.
"""
pass
def getFlagArgumentList(*args, **kwargs):
"""
getFlagArgumentList(flagName, occurrence) -> MArgList
Returns the arguments for the specified occurrence of the given
multi-use flag as an MArgList. Raises RuntimeError if the flag has
not been enabled for multi-use. Raises IndexError if occurrence is
out of range.
"""
pass
def getFlagArgumentPosition(*args, **kwargs):
"""
getFlagArgumentPosition(flagName, occurrence) -> int
Returns the position in the argument list of the specified occurrence
of the given flag. Raises IndexError if occurrence is out of range.
"""
pass
def getObjectStrings(*args, **kwargs):
"""
getObjectStrings() -> tuple of unicode strings
If the command's MSyntax has set the object format to kStringObjects
then this method will return the objects passed to the command as a
tuple of strings. If any other object format is set then an empty
tuple will be returned.
"""
pass
def isFlagSet(*args, **kwargs):
"""
isFlagSet(flagName) -> bool
Returns True if the given flag appears on the command line.
"""
pass
def numberOfFlagUses(*args, **kwargs):
"""
numberOfFlagUses(flagName) -> int
Returns the number of times that the flag appears on the command
line.
"""
pass
isEdit = None
isQuery = None
numberOfFlagsUsed = None
__new__ = None
class MQuaternion(object):
"""
Quaternion math.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __neg__(*args, **kwargs):
"""
x.__neg__() <==> -x
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def asAxisAngle(*args, **kwargs):
"""
Returns the rotation as a tuple containing an axis vector and an angle in radians about that axis.
"""
pass
def asEulerRotation(*args, **kwargs):
"""
Returns the rotation as an equivalent MEulerRotation.
"""
pass
def asMatrix(*args, **kwargs):
"""
Returns the rotation as an equivalent rotation matrix.
"""
pass
def conjugate(*args, **kwargs):
"""
Returns the conjugate of this quaternion (i.e. x, y and z components negated).
"""
pass
def conjugateIt(*args, **kwargs):
"""
In-place conjugation (i.e. negates the x, y and z components).
"""
pass
def exp(*args, **kwargs):
"""
Returns a new quaternion containing the exponent of this one.
"""
pass
def inverse(*args, **kwargs):
"""
Returns a new quaternion containing the inverse of this one.
"""
pass
def invertIt(*args, **kwargs):
"""
In-place inversion.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Returns True if the distance between the two quaternions (in quaternion space) is less than or equal to the given tolerance.
"""
pass
def log(*args, **kwargs):
"""
Returns a new quaternion containing the natural log of this one.
"""
pass
def negateIt(*args, **kwargs):
"""
In-place negation of the x, y, z and w components.
"""
pass
def normal(*args, **kwargs):
"""
Returns a new quaternion containing the normalized version of this one (i.e. scaled to unit length).
"""
pass
def normalizeIt(*args, **kwargs):
"""
In-place normalization (i.e. scales the quaternion to unit length).
"""
pass
def setToXAxis(*args, **kwargs):
"""
Set this quaternion to be equivalent to a rotation of a given angle, in radians, about the X-axis.
"""
pass
def setToYAxis(*args, **kwargs):
"""
Set this quaternion to be equivalent to a rotation of a given angle, in radians, about the Y-axis.
"""
pass
def setToZAxis(*args, **kwargs):
"""
Set this quaternion to be equivalent to a rotation of a given angle, in radians, about the Z-axis.
"""
pass
def setValue(*args, **kwargs):
"""
Set the value of this quaternion to that of the specified MQuaternion, MEulerRotation, MMatrix or MVector and angle.
"""
pass
def slerp(*args, **kwargs):
"""
Returns the quaternion at a given interpolation value along the shortest path between two quaternions.
"""
pass
def squad(*args, **kwargs):
"""
Returns the quaternion at a given interpolation value along a cubic curve segment in quaternion space.
"""
pass
def squadPt(*args, **kwargs):
"""
Returns a new quaternion representing an intermediate point which when used with squad() will produce a C1 continuous spline.
"""
pass
w = None
x = None
y = None
z = None
__new__ = None
kIdentity = None
kTolerance = 1e-10
class MPxAttributePatternFactory(object):
"""
Base class for custom attribute pattern factories.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
__new__ = None
class MColor(object):
"""
Manipulate color data.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __div__(*args, **kwargs):
"""
x.__div__(y) <==> x/y
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __idiv__(*args, **kwargs):
"""
x.__idiv__(y) <==> x/=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __rdiv__(*args, **kwargs):
"""
x.__rdiv__(y) <==> y/x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def getColor(*args, **kwargs):
"""
Returns a list containing the color's components, in the specified color model.
"""
pass
def setColor(*args, **kwargs):
"""
Sets the color's components and color model.
"""
pass
a = None
b = None
g = None
r = None
__new__ = None
kByte = 1
kCMY = 2
kCMYK = 3
kFloat = 0
kHSV = 1
kOpaqueBlack = None
kRGB = 0
kShort = 2
class MSelectionList(object):
"""
A heterogenous list of MObjects, MPlugs and MDagPaths.
__init__()
Initializes a new, empty MSelectionList object.
__init__(MSelectionList other)
Initializes a new MSelectionList object containing the same
items as another list.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def add(*args, **kwargs):
"""
add(pattern, searchChildNamespaces=False) -> self
add(item, mergeWithExisting=True) -> self
The first version adds to the list any nodes, DAG paths, components
or plugs which match the given the pattern string.
The second version adds the specific item to the list, where the
item can be a plug (MPlug), a node (MObject), a DAG path (MDagPath)
or a component (tuple of (MDagPath, MObject) ).
"""
pass
def clear(*args, **kwargs):
"""
clear() -> self
Empties the selection list.
"""
pass
def copy(*args, **kwargs):
"""
copy(src) -> self
Replaces the contents of the selection list with a copy of those from src (MSelectionList).
"""
pass
def getComponent(*args, **kwargs):
"""
getComponent(index) -> (MDagPath, MObject)
Returns the index'th item of the list as a component, represented by
a tuple containing an MDagPath and an MObject. If the item is just a
DAG path without a component then MObject.kNullObj will be returned
in the second element of the tuple. Raises TypeError if the item is
neither a DAG path nor a component. Raises IndexError if index is
out of range.
"""
pass
def getDagPath(*args, **kwargs):
"""
getDagPath(index) -> MDagPath
Returns the DAG path associated with the index'th item of the list.
Raises TypeError if the item is neither a DAG path nor a component.
Raises IndexError if index is out of range.
"""
pass
def getDependNode(*args, **kwargs):
"""
getDependNode(index) -> MObject
Returns the node associated with the index'th item, whether it be a
dependency node, DAG path, component or plug. Raises IndexError if
index is out of range.
"""
pass
def getPlug(*args, **kwargs):
"""
getPlug(index) -> MPlug
Returns the index'th item of the list as a plug. Raises TypeError if
the item is not a plug. Raises IndexError if index is out of range.
"""
pass
def getSelectionStrings(*args, **kwargs):
"""
getSelectionStrings(index=None) -> (string, string, ...)
Returns a tuple containing the string representation of the
specified item. For nodes, DAG paths, plugs and contiguous
components the tuple will only contain a single string, but for non-
contiguous components there will be a separate string for each
distinct block of contiguous elements. If index is not specified
then the string representations of all the items in the selection
list are returned. Raises IndexError if index is out of bounds.
"""
pass
def hasItem(*args, **kwargs):
"""
hasItem(item) -> bool
Returns True if the given item is on the selection list. For a
component this means that all of the elements of the component must
be on the list. A component is passed as a tuple containing the
MDagPath of the DAG node and an MObject containing the component.
"""
pass
def hasItemPartly(*args, **kwargs):
"""
hasItemPartly(dagPath, component) -> bool
Returns True if at least one of the component's elements is on the
selection list. Raises TypeError if dagPath is invalid or component
does not contain a component.
"""
pass
def isEmpty(*args, **kwargs):
"""
isEmpty() -> bool
Returns True if the selection list is empty.
"""
pass
def length(*args, **kwargs):
"""
length() -> int
Returns the number of items on the selection list.
"""
pass
def merge(*args, **kwargs):
"""
merge(other, strategy=kMergeNormal) -> self
merge(dagPath, component, strategy=kMergeNormal) -> self
The first version merges the items from another selection list in
with those already on the list, using the given strategy.
The second version merges the specified component with those already
on the list.
"""
pass
def remove(*args, **kwargs):
"""
remove(index) -> self
Removes the index'th item from the list. Raises IndexError if the
index is out of range.
"""
pass
def replace(*args, **kwargs):
"""
replace(index, newItem) -> self
Replaces the index'th item on the list with a new item. A component
is passed as a tuple containing the MDagPath of the DAG node and an
MObject containing the component. Raises IndexError if the index is
out of range.
"""
pass
def toggle(*args, **kwargs):
"""
toggle(dagPath, component) -> self
Removes from the list those elements of the given component which
are already on it and adds those which are not.
"""
pass
__new__ = None
kMergeNormal = 0
kRemoveFromList = 2
kXORWithList = 1
class MFn(object):
"""
Static class providing constants for all API types.
"""
kAISEnvFacade = 961
kAddDoubleLinear = 5
kAdskMaterial = 1049
kAffect = 6
kAimConstraint = 111
kAir = 257
kAlignCurve = 41
kAlignManip = 897
kAlignSurface = 42
kAmbientLight = 303
kAngle = 270
kAngleBetween = 21
kAnimBlend = 781
kAnimBlendInOut = 782
kAnimCurve = 7
kAnimCurveTimeToAngular = 8
kAnimCurveTimeToDistance = 9
kAnimCurveTimeToTime = 10
kAnimCurveTimeToUnitless = 11
kAnimCurveUnitlessToAngular = 12
kAnimCurveUnitlessToDistance = 13
kAnimCurveUnitlessToTime = 14
kAnimCurveUnitlessToUnitless = 15
kAnimLayer = 1002
kAnisotropy = 609
kAnnotation = 271
kAnyGeometryVarGroup = 115
kArcLength = 273
kAreaLight = 305
kArrayMapper = 517
kArrowManip = 123
kAssembly = 1063
kAsset = 1000
kAttachCurve = 43
kAttachSurface = 44
kAttribute = 554
kAttribute2Double = 734
kAttribute2Float = 735
kAttribute2Int = 737
kAttribute2Long = 737
kAttribute2Short = 736
kAttribute3Double = 738
kAttribute3Float = 739
kAttribute3Int = 741
kAttribute3Long = 741
kAttribute3Short = 740
kAttribute4Double = 866
kAudio = 22
kAverageCurveManip = 149
kAvgCurves = 45
kAvgNurbsSurfacePoints = 47
kAvgSurfacePoints = 46
kAxesActionManip = 124
kBackground = 23
kBallProjectionManip = 125
kBarnDoorManip = 150
kBase = 1
kBaseLattice = 249
kBendLattice = 335
kBevel = 48
kBevelManip = 151
kBevelPlus = 885
kBezierCurve = 1036
kBezierCurveData = 1037
kBezierCurveToNurbs = 1039
kBinaryData = 733
kBirailSrf = 49
kBlend = 27
kBlendColorSet = 726
kBlendColors = 31
kBlendDevice = 30
kBlendManip = 152
kBlendNodeAdditiveRotation = 1015
kBlendNodeAdditiveScale = 1014
kBlendNodeBase = 1003
kBlendNodeBoolean = 1004
kBlendNodeDouble = 1005
kBlendNodeDoubleAngle = 1006
kBlendNodeDoubleLinear = 1007
kBlendNodeEnum = 1008
kBlendNodeFloat = 1009
kBlendNodeFloatAngle = 1010
kBlendNodeFloatLinear = 1011
kBlendNodeInt16 = 1012
kBlendNodeInt32 = 1013
kBlendNodeTime = 1034
kBlendShape = 336
kBlendTwoAttr = 28
kBlendWeighted = 29
kBlindData = 743
kBlindDataTemplate = 744
kBlinn = 365
kBlinnMaterial = 380
kBoundary = 53
kBox = 853
kBoxData = 852
kBrownian = 497
kBrush = 752
kBulge = 486
kBulgeLattice = 337
kBump = 32
kBump3d = 33
kButtonManip = 153
kCacheBase = 981
kCacheBlend = 982
kCacheFile = 971
kCacheTrack = 983
kCacheableNode = 978
kCamera = 250
kCameraManip = 154
kCameraPlaneManip = 143
kCameraSet = 993
kCameraView = 34
kCenterManip = 134
kChainToSpline = 35
kCharacter = 675
kCharacterMap = 790
kCharacterMappingData = 729
kCharacterOffset = 676
kChecker = 487
kChoice = 36
kChooser = 759
kCircle = 54
kCircleManip = 126
kCirclePointManip = 231
kCircleSweepManip = 128
kClampColor = 39
kClientDevice = 1059
kClip = 796
kClipGhostShape = 1064
kClipLibrary = 767
kClipScheduler = 766
kClipToGhostData = 1065
kCloseCurve = 55
kCloseSurface = 57
kClosestPointOnMesh = 973
kClosestPointOnSurface = 56
kCloth = 488
kCloud = 498
kCluster = 251
kClusterFilter = 344
kClusterFlexor = 300
kCoiManip = 155
kCollision = 253
kColorBackground = 24
kColorProfile = 1048
kCommCornerManip = 600
kCommCornerOperManip = 601
kCommEdgeOperManip = 598
kCommEdgePtManip = 597
kCommEdgeSegmentManip = 599
kComponent = 524
kComponentListData = 572
kComponentManip = 661
kCompoundAttribute = 564
kConcentricProjectionManip = 129
kCondition = 37
kCone = 96
kConstraint = 917
kContainer = 995
kContainerBase = 1050
kContrast = 38
kControl = 475
kCopyColorSet = 725
kCopyUVSet = 794
kCpManip = 156
kCrater = 499
kCreaseSet = 1072
kCreate = 40
kCreateBPManip = 823
kCreateBezierManip = 1035
kCreateCVManip = 157
kCreateColorSet = 723
kCreateEPManip = 158
kCreateSectionManip = 810
kCreateUVSet = 795
kCrossSectionEditManip = 811
kCrossSectionManager = 809
kCubicProjectionManip = 130
kCurve = 266
kCurveCVComponent = 525
kCurveCurveIntersect = 628
kCurveEPComponent = 526
kCurveEdManip = 159
kCurveFromMeshCoM = 919
kCurveFromMeshEdge = 627
kCurveFromSubdivEdge = 822
kCurveFromSubdivFace = 828
kCurveFromSurface = 58
kCurveFromSurfaceBnd = 59
kCurveFromSurfaceCoS = 60
kCurveFromSurfaceIso = 61
kCurveInfo = 62
kCurveKnotComponent = 527
kCurveNormalizerAngle = 985
kCurveNormalizerLinear = 986
kCurveParamComponent = 528
kCurveSegmentManip = 160
kCurveVarGroup = 116
kCylinder = 98
kCylindricalProjectionManip = 131
kDOF = 323
kDPbirailSrf = 50
kDagContainer = 1051
kDagNode = 107
kDagPose = 677
kDagSelectionItem = 551
kData = 571
kData2Double = 581
kData2Float = 582
kData2Int = 583
kData2Long = 583
kData2Short = 584
kData3Double = 585
kData3Float = 586
kData3Int = 587
kData3Long = 587
kData3Short = 588
kData4Double = 867
kDblTrsManip = 190
kDecayRegionCapComponent = 537
kDecayRegionComponent = 538
kDefaultLightList = 317
kDeformBend = 612
kDeformBendManip = 618
kDeformFlare = 615
kDeformFlareManip = 621
kDeformFunc = 611
kDeformSine = 616
kDeformSineManip = 622
kDeformSquash = 614
kDeformSquashManip = 620
kDeformTwist = 613
kDeformTwistManip = 619
kDeformWave = 617
kDeformWaveManip = 623
kDeleteColorSet = 724
kDeleteComponent = 318
kDeleteUVSet = 787
kDependencyNode = 4
kDetachCurve = 63
kDetachSurface = 64
kDiffuseMaterial = 378
kDimension = 269
kDimensionManip = 232
kDirectedDisc = 276
kDirectionManip = 161
kDirectionalLight = 308
kDiscManip = 132
kDiskCache = 849
kDispatchCompute = 319
kDisplacementShader = 321
kDisplayLayer = 720
kDisplayLayerManager = 721
kDistance = 272
kDistanceBetween = 322
kDistanceManip = 625
kDofManip = 162
kDoubleAngleAttribute = 556
kDoubleArrayData = 573
kDoubleIndexedComponent = 701
kDoubleLinearAttribute = 558
kDoubleShadingSwitch = 606
kDrag = 258
kDropOffFunction = 812
kDropoffLocator = 282
kDropoffManip = 163
kDummy = 254
kDummyConnectable = 324
kDynAirManip = 711
kDynArrayAttrsData = 716
kDynAttenuationManip = 715
kDynBase = 707
kDynBaseFieldManip = 710
kDynEmitterManip = 708
kDynFieldsManip = 709
kDynGlobals = 756
kDynNewtonManip = 712
kDynParticleSetComponent = 549
kDynSpreadManip = 714
kDynSweptGeometryData = 730
kDynTurbulenceManip = 713
kDynamicConstraint = 975
kDynamicsController = 325
kEdgeComponent = 534
kEditCurve = 807
kEditCurveManip = 808
kEditMetadata = 1071
kEmitter = 255
kEnableManip = 136
kEnumAttribute = 561
kEnvBall = 480
kEnvChrome = 482
kEnvCube = 481
kEnvFacade = 960
kEnvFogMaterial = 372
kEnvFogShape = 278
kEnvSky = 483
kEnvSphere = 484
kExplodeNurbsShell = 679
kExpression = 327
kExtendCurve = 65
kExtendCurveDistanceManip = 164
kExtendSurface = 66
kExtendSurfaceDistanceManip = 703
kExtract = 328
kExtrude = 67
kExtrudeManip = 165
kFFD = 338
kFFblendSrf = 68
kFFfilletSrf = 69
kFacade = 958
kFfdDualBase = 339
kField = 256
kFileBackground = 25
kFileTexture = 489
kFilletCurve = 70
kFilter = 329
kFilterClosestSample = 330
kFilterEuler = 331
kFilterSimplify = 332
kFitBspline = 71
kFixedLineManip = 233
kFlexor = 299
kFloatAngleAttribute = 557
kFloatArrayData = 1019
kFloatLinearAttribute = 559
kFloatMatrixAttribute = 568
kFloatVectorArrayData = 996
kFlow = 72
kFluid = 899
kFluidData = 901
kFluidEmitter = 905
kFluidGeom = 900
kFluidTexture2D = 894
kFluidTexture3D = 893
kFollicle = 920
kForceUpdateManip = 682
kFosterParent = 1074
kFourByFourMatrix = 762
kFractal = 490
kFreePointManip = 133
kFreePointTriadManip = 137
kGammaCorrect = 333
kGenericAttribute = 565
kGeoConnectable = 326
kGeoConnector = 907
kGeometric = 265
kGeometryConstraint = 113
kGeometryData = 699
kGeometryFilt = 334
kGeometryOnLineManip = 142
kGeometryVarGroup = 114
kGlobalCacheControls = 848
kGlobalStitch = 688
kGranite = 500
kGravity = 259
kGreasePencilSequence = 1070
kGreasePlane = 1068
kGreasePlaneRenderShape = 1069
kGrid = 491
kGroundPlane = 290
kGroupId = 348
kGroupParts = 349
kGuide = 350
kGuideLine = 301
kHairConstraint = 925
kHairSystem = 921
kHairTubeShader = 932
kHandleRotateManip = 216
kHardenPointCurve = 73
kHardwareReflectionMap = 872
kHardwareRenderGlobals = 516
kHardwareRenderingGlobals = 1053
kHeightField = 906
kHikEffector = 945
kHikFKJoint = 947
kHikFloorContactMarker = 967
kHikGroundPlane = 968
kHikHandle = 949
kHikIKEffector = 946
kHikSolver = 948
kHistorySwitch = 972
kHsvToRgb = 351
kHwShaderNode = 875
kHyperGraphInfo = 352
kHyperLayout = 353
kHyperLayoutDG = 987
kHyperView = 354
kIkEffector = 119
kIkHandle = 120
kIkRPManip = 167
kIkSolver = 355
kIkSplineManip = 166
kIkSystem = 361
kIllustratorCurve = 74
kImageAdd = 646
kImageBlur = 652
kImageColorCorrect = 651
kImageData = 640
kImageDepth = 654
kImageDiff = 647
kImageDisplay = 655
kImageFilter = 653
kImageLoad = 641
kImageMotionBlur = 657
kImageMultiply = 648
kImageNetDest = 644
kImageNetSrc = 643
kImageOver = 649
kImagePlane = 362
kImageRender = 645
kImageSave = 642
kImageSource = 778
kImageUnder = 650
kImageView = 656
kImplicitCone = 880
kImplicitSphere = 881
kInsertKnotCrv = 75
kInsertKnotSrf = 76
kInstancer = 749
kIntArrayData = 574
kIntersectSurface = 77
kInvalid = 0
kIsoparmComponent = 529
kIsoparmManip = 146
kItemList = 553
kJiggleDeformer = 847
kJoint = 121
kJointCluster = 346
kJointClusterManip = 168
kJointTranslateManip = 229
kKeyframeDelta = 934
kKeyframeDeltaAddRemove = 937
kKeyframeDeltaBlockAddRemove = 938
kKeyframeDeltaBreakdown = 942
kKeyframeDeltaInfType = 939
kKeyframeDeltaMove = 935
kKeyframeDeltaScale = 936
kKeyframeDeltaTangent = 940
kKeyframeDeltaWeighted = 941
kKeyframeRegionManip = 984
kKeyingGroup = 674
kLambert = 363
kLambertMaterial = 379
kLast = 1075
kLattice = 279
kLatticeComponent = 535
kLatticeData = 575
kLatticeGeom = 280
kLayeredShader = 368
kLayeredTexture = 791
kLeastSquares = 370
kLeather = 501
kLight = 302
kLightDataAttribute = 566
kLightFogMaterial = 371
kLightInfo = 369
kLightLink = 755
kLightList = 373
kLightManip = 169
kLightProjectionGeometry = 234
kLightSource = 374
kLightSourceMaterial = 382
kLimitManip = 135
kLineArrowManip = 235
kLineManip = 147
kLineModifier = 962
kLinearLight = 306
kLocator = 281
kLodGroup = 760
kLodThresholds = 758
kLookAt = 112
kLuminance = 375
kMCsolver = 356
kMPbirailSrf = 51
kMakeGroup = 376
kMandelbrot = 1066
kMandelbrot3D = 1067
kManip2DContainer = 192
kManipContainer = 148
kManipulator = 230
kManipulator2D = 205
kManipulator3D = 122
kMarble = 502
kMarker = 283
kMarkerManip = 210
kMaterial = 377
kMaterialFacade = 959
kMaterialInfo = 383
kMatrixAdd = 384
kMatrixAttribute = 567
kMatrixData = 576
kMatrixFloatData = 659
kMatrixHold = 385
kMatrixMult = 386
kMatrixPass = 387
kMatrixWtAdd = 388
kMembrane = 1020
kMentalRayTexture = 927
kMergeVertsToolManip = 1021
kMesh = 296
kMeshComponent = 539
kMeshData = 577
kMeshEdgeComponent = 540
kMeshFaceVertComponent = 544
kMeshFrEdgeComponent = 542
kMeshGeom = 297
kMeshMapComponent = 803
kMeshPolygonComponent = 541
kMeshVarGroup = 117
kMeshVertComponent = 543
kMeshVtxFaceComponent = 732
kMessageAttribute = 569
kMidModifier = 389
kMidModifierWithMatrix = 390
kModel = 3
kModifyEdgeBaseManip = 824
kModifyEdgeCrvManip = 815
kModifyEdgeManip = 816
kMotionPath = 435
kMotionPathManip = 170
kMountain = 492
kMoveUVShellManip2D = 697
kMoveVertexManip = 750
kMultDoubleLinear = 761
kMultiSubVertexComponent = 547
kMultilisterLight = 436
kMultiplyDivide = 437
kMute = 916
kNBase = 980
kNCloth = 989
kNComponent = 976
kNId = 1018
kNIdData = 1017
kNObject = 998
kNObjectData = 997
kNParticle = 990
kNRigid = 991
kNamedObject = 2
kNearestPointOnCurve = 1047
kNewton = 260
kNoise = 865
kNonAmbientLight = 304
kNonDagSelectionItem = 552
kNonExtendedLight = 307
kNonLinear = 610
kNormalConstraint = 238
kNucleus = 979
kNumericAttribute = 555
kNumericData = 580
kNurbsBoolean = 680
kNurbsCircular2PtArc = 630
kNurbsCircular3PtArc = 629
kNurbsCube = 80
kNurbsCurve = 267
kNurbsCurveData = 579
kNurbsCurveGeom = 268
kNurbsCurveToBezier = 1038
kNurbsPlane = 79
kNurbsSquare = 608
kNurbsSurface = 294
kNurbsSurfaceData = 578
kNurbsSurfaceGeom = 295
kNurbsTesselate = 78
kNurbsToSubdiv = 747
kObjectAttrFilter = 667
kObjectBinFilter = 928
kObjectFilter = 663
kObjectMultiFilter = 664
kObjectNameFilter = 665
kObjectRenderFilter = 668
kObjectScriptFilter = 669
kObjectTypeFilter = 666
kOcean = 861
kOceanShader = 884
kOffsetCos = 81
kOffsetCosManip = 171
kOffsetCurve = 82
kOffsetCurveManip = 172
kOffsetSurface = 631
kOffsetSurfaceManip = 639
kOldGeometryConstraint = 438
kOpticalFX = 439
kOrientConstraint = 239
kOrientationComponent = 545
kOrientationLocator = 286
kOrientationMarker = 284
kOrthoGrid = 291
kPASolver = 357
kPairBlend = 912
kParamDimension = 275
kParentConstraint = 242
kParticle = 311
kParticleAgeMapper = 440
kParticleCloud = 441
kParticleColorMapper = 442
kParticleIncandecenceMapper = 443
kParticleSamplerInfo = 793
kParticleTransparencyMapper = 444
kPartition = 445
kPassContributionMap = 774
kPfxGeometry = 930
kPfxHair = 931
kPfxToon = 955
kPhong = 366
kPhongExplorer = 367
kPhongMaterial = 381
kPivotComponent = 530
kPivotManip2D = 191
kPlace2dTexture = 446
kPlace3dTexture = 447
kPlanarProjectionManip = 207
kPlanarTrimSrf = 83
kPlane = 288
kPlugin = 570
kPluginCameraSet = 994
kPluginClientDevice = 1060
kPluginConstraintNode = 999
kPluginData = 589
kPluginDeformerNode = 602
kPluginDependNode = 448
kPluginEmitterNode = 718
kPluginFieldNode = 717
kPluginGeometryData = 754
kPluginHardwareShader = 876
kPluginHwShaderNode = 877
kPluginIkSolver = 748
kPluginImagePlaneNode = 988
kPluginLocatorNode = 449
kPluginManipContainer = 683
kPluginManipulatorNode = 1016
kPluginObjectSet = 909
kPluginParticleAttributeMapperNode = 992
kPluginShape = 698
kPluginSpringNode = 719
kPluginThreadedDevice = 1061
kPluginTransformNode = 898
kPlusMinusAverage = 450
kPointArrayData = 590
kPointConstraint = 240
kPointLight = 309
kPointManip = 236
kPointMatrixMult = 451
kPointOnCurveInfo = 84
kPointOnCurveManip = 208
kPointOnLineManip = 211
kPointOnPolyConstraint = 1042
kPointOnSurfaceInfo = 85
kPointOnSurfaceManip = 212
kPoleVectorConstraint = 243
kPolyAppend = 393
kPolyAppendVertex = 783
kPolyArrow = 963
kPolyAutoProj = 837
kPolyAutoProjManip = 951
kPolyAverageVertex = 836
kPolyBevel = 391
kPolyBlindData = 745
kPolyBoolOp = 604
kPolyBridgeEdge = 977
kPolyChipOff = 394
kPolyCloseBorder = 395
kPolyCollapseEdge = 396
kPolyCollapseF = 397
kPolyColorDel = 728
kPolyColorMod = 727
kPolyColorPerVertex = 722
kPolyComponentData = 969
kPolyCone = 427
kPolyConnectComponents = 1043
kPolyCreaseEdge = 944
kPolyCreateFacet = 433
kPolyCreateToolManip = 140
kPolyCreator = 425
kPolyCube = 428
kPolyCut = 887
kPolyCutManip = 891
kPolyCutManipContainer = 890
kPolyCylProj = 398
kPolyCylinder = 429
kPolyDelEdge = 399
kPolyDelFacet = 400
kPolyDelVertex = 401
kPolyDuplicateEdge = 957
kPolyEdgeToCurve = 1001
kPolyEditEdgeFlow = 1073
kPolyExtrudeEdge = 780
kPolyExtrudeFacet = 402
kPolyExtrudeManip = 1056
kPolyExtrudeManipContainer = 1057
kPolyExtrudeVertex = 911
kPolyFlipEdge = 779
kPolyFlipUV = 874
kPolyHelix = 970
kPolyHoleFace = 1041
kPolyLayoutUV = 838
kPolyMapCut = 403
kPolyMapDel = 404
kPolyMapSew = 405
kPolyMapSewMove = 839
kPolyMappingManip = 194
kPolyMergeEdge = 406
kPolyMergeFacet = 407
kPolyMergeUV = 895
kPolyMergeVert = 685
kPolyMesh = 430
kPolyMirror = 943
kPolyModifierManip = 195
kPolyMoveEdge = 408
kPolyMoveFacet = 409
kPolyMoveFacetUV = 410
kPolyMoveUV = 411
kPolyMoveUVManip = 193
kPolyMoveVertex = 412
kPolyMoveVertexManip = 196
kPolyMoveVertexUV = 413
kPolyNormal = 414
kPolyNormalPerVertex = 746
kPolyNormalizeUV = 873
kPolyPipe = 966
kPolyPlanProj = 415
kPolyPlatonicSolid = 965
kPolyPoke = 888
kPolyPokeManip = 892
kPolyPrimitive = 426
kPolyPrimitiveMisc = 964
kPolyPrism = 952
kPolyProj = 416
kPolyProjectCurve = 1054
kPolyProjectionManip = 174
kPolyPyramid = 953
kPolyQuad = 417
kPolyReduce = 757
kPolySelectEditFeedbackManip = 1024
kPolySeparate = 452
kPolySewEdge = 684
kPolySmooth = 418
kPolySmoothFacet = 686
kPolySmoothProxy = 929
kPolySoftEdge = 419
kPolySphProj = 420
kPolySphere = 431
kPolySpinEdge = 1040
kPolySplit = 421
kPolySplitEdge = 801
kPolySplitRing = 954
kPolySplitToolManip = 141
kPolySplitVert = 797
kPolyStraightenUVBorder = 896
kPolySubdEdge = 422
kPolySubdFacet = 423
kPolyToSubdiv = 672
kPolyToolFeedbackManip = 1023
kPolyToolFeedbackShape = 312
kPolyTorus = 432
kPolyTransfer = 835
kPolyTriangulate = 424
kPolyTweak = 392
kPolyTweakUV = 696
kPolyUVRectangle = 1052
kPolyUnite = 434
kPolyVertexNormalManip = 197
kPolyWedgeFace = 889
kPositionMarker = 285
kPostProcessList = 453
kPrecompExport = 775
kPrimitive = 86
kProjectCurve = 87
kProjectTangent = 88
kProjectTangentManip = 177
kProjection = 454
kProjectionManip = 173
kProjectionMultiManip = 176
kProjectionUVManip = 175
kPropModManip = 178
kPropMoveTriadManip = 138
kProxy = 108
kProxyManager = 950
kPsdFileTexture = 933
kQuadPtOnLineManip = 179
kQuadShadingSwitch = 910
kRBFsurface = 89
kRPsolver = 359
kRadial = 261
kRadius = 274
kRamp = 493
kRampBackground = 26
kRampShader = 882
kRbfSrfManip = 180
kRebuildCurve = 90
kRebuildSurface = 91
kRecord = 455
kReference = 742
kReflect = 364
kRemapColor = 923
kRemapHsv = 924
kRemapValue = 922
kRenderBox = 854
kRenderCone = 97
kRenderGlobals = 512
kRenderGlobalsList = 513
kRenderLayer = 772
kRenderLayerManager = 773
kRenderPass = 770
kRenderPassSet = 771
kRenderQuality = 514
kRenderRect = 277
kRenderSetup = 511
kRenderSphere = 298
kRenderTarget = 776
kRenderUtilityList = 456
kRenderedImageSource = 777
kRenderingList = 1055
kResolution = 515
kResultCurve = 16
kResultCurveTimeToAngular = 17
kResultCurveTimeToDistance = 18
kResultCurveTimeToTime = 19
kResultCurveTimeToUnitless = 20
kReverse = 457
kReverseCrvManip = 182
kReverseCurve = 92
kReverseCurveManip = 181
kReverseSurface = 93
kReverseSurfaceManip = 183
kRevolve = 94
kRevolveManip = 184
kRevolvedPrimitive = 95
kRevolvedPrimitiveManip = 185
kRgbToHsv = 458
kRigid = 314
kRigidConstraint = 313
kRigidDeform = 340
kRigidSolver = 459
kRock = 503
kRotateBoxManip = 214
kRotateLimitsManip = 217
kRotateManip = 215
kRotateUVManip2D = 694
kRoundConstantRadius = 632
kRoundConstantRadiusManip = 635
kRoundRadiusCrvManip = 634
kRoundRadiusManip = 633
kSCsolver = 358
kSPbirailSrf = 52
kSamplerInfo = 467
kScaleConstraint = 244
kScaleLimitsManip = 218
kScaleManip = 219
kScalePointManip = 817
kScaleUVManip2D = 695
kScalingBoxManip = 220
kScreenAlignedCircleManip = 127
kScript = 626
kScriptManip = 221
kSculpt = 341
kSectionManip = 804
kSelectionItem = 550
kSelectionList = 595
kSelectionListData = 662
kSelectionListOperator = 670
kSequenceManager = 1031
kSequencer = 1032
kSet = 460
kSetGroupComponent = 548
kSetRange = 463
kSfRevolveManip = 827
kShaderGlow = 464
kShaderList = 465
kShadingEngine = 320
kShadingMap = 466
kShape = 248
kShapeFragment = 468
kShot = 1033
kSimpleVolumeShader = 469
kSingleIndexedComponent = 700
kSingleShadingSwitch = 605
kSketchPlane = 289
kSkin = 100
kSkinBinding = 1044
kSkinClusterFilter = 673
kSkinShader = 660
kSl60 = 470
kSmear = 902
kSmoothCurve = 687
kSmoothTangentSrf = 769
kSnapshot = 471
kSnapshotPath = 908
kSnapshotShape = 845
kSnow = 504
kSoftMod = 252
kSoftModFilter = 345
kSoftModManip = 624
kSolidFractal = 505
kSphere = 99
kSphereData = 591
kSphericalProjectionManip = 222
kSplineSolver = 360
kSpotCylinderManip = 187
kSpotLight = 310
kSpotManip = 186
kSpring = 315
kSprite = 292
kSquareSrf = 704
kSquareSrfManip = 705
kStateManip = 145
kStencil = 494
kStereoCameraMaster = 1030
kStitchAsNurbsShell = 678
kStitchSrf = 101
kStitchSrfManip = 681
kStoryBoard = 472
kStringArrayData = 593
kStringData = 592
kStringShadingSwitch = 903
kStroke = 751
kStrokeGlobals = 753
kStucco = 506
kStudioClearCoat = 904
kStyleCurve = 886
kSubCurve = 102
kSubSurface = 768
kSubVertexComponent = 546
kSubdAddTopology = 878
kSubdAutoProj = 863
kSubdBlindData = 789
kSubdBoolean = 813
kSubdCleanTopology = 879
kSubdCloseBorder = 850
kSubdDelFace = 844
kSubdExtrudeFace = 825
kSubdHierBlind = 788
kSubdLayoutUV = 859
kSubdMapCut = 858
kSubdMapSewMove = 860
kSubdMappingManip = 871
kSubdMergeVert = 851
kSubdModifier = 840
kSubdModifyEdge = 814
kSubdMoveEdge = 842
kSubdMoveFace = 843
kSubdMoveVertex = 841
kSubdPlanProj = 868
kSubdProjectionManip = 870
kSubdSplitFace = 855
kSubdSubdivideFace = 864
kSubdTweak = 869
kSubdTweakUV = 857
kSubdiv = 671
kSubdivCVComponent = 689
kSubdivCollapse = 792
kSubdivCompId = 785
kSubdivData = 798
kSubdivEdgeComponent = 690
kSubdivFaceComponent = 691
kSubdivGeom = 799
kSubdivMapComponent = 846
kSubdivReverseFaces = 802
kSubdivSurfaceVarGroup = 826
kSubdivToNurbs = 806
kSubdivToPoly = 706
kSummaryObject = 473
kSuper = 474
kSurface = 293
kSurfaceCVComponent = 531
kSurfaceEPComponent = 532
kSurfaceEdManip = 764
kSurfaceFaceComponent = 765
kSurfaceInfo = 103
kSurfaceKnotComponent = 533
kSurfaceLuminance = 476
kSurfaceRangeComponent = 536
kSurfaceShader = 477
kSurfaceVarGroup = 118
kSymmetryConstraint = 241
kSymmetryLocator = 819
kSymmetryMapCurve = 821
kSymmetryMapVector = 820
kTangentConstraint = 245
kTexLattice = 200
kTexLatticeDeformManip = 199
kTexSmoothManip = 201
kTexSmudgeUVManip = 198
kTextButtonManip = 638
kTextCurves = 104
kTextManip = 913
kTexture2d = 485
kTexture3d = 496
kTextureBakeSet = 461
kTextureEnv = 479
kTextureList = 478
kTextureManip3D = 223
kThreadedDevice = 1058
kThreePointArcManip = 636
kTime = 509
kTimeAttribute = 560
kTimeFunction = 926
kTimeToUnitConversion = 510
kTimeWarp = 1062
kToggleManip = 224
kToggleOnLineManip = 144
kToonLineAttributes = 956
kTorus = 603
kTowPointManip = 139
kTowPointOnCurveManip = 209
kTowPointOnSurfaceManip = 763
kTransferAttributes = 974
kTransform = 110
kTransformBoxManip = 818
kTransformGeometry = 596
kTranslateBoxManip = 225
kTranslateLimitsManip = 226
kTranslateManip = 227
kTranslateManip2D = 206
kTranslateUVManip = 213
kTranslateUVManip2D = 693
kTriadManip = 237
kTrim = 105
kTrimLocator = 287
kTrimManip = 228
kTrimWithBoundaries = 918
kTriplanarProjectionManip = 188
kTripleIndexedComponent = 702
kTripleShadingSwitch = 607
kTrsInsertManip = 203
kTrsManip = 189
kTrsTransManip = 202
kTrsXformManip = 204
kTurbulence = 262
kTweak = 342
kTwoPointArcManip = 637
kTxSl = 507
kTypedAttribute = 563
kUInt64ArrayData = 800
kUVManip2D = 692
kUint64SingleIndexedComponent = 1022
kUnderWorld = 109
kUniform = 263
kUnitAttribute = 562
kUnitConversion = 518
kUnitToTimeConversion = 519
kUnknown = 521
kUnknownDag = 316
kUnknownTransform = 246
kUntrim = 106
kUnused1 = 829
kUnused2 = 830
kUnused3 = 831
kUnused4 = 832
kUnused5 = 833
kUnused6 = 834
kUseBackground = 520
kUvChooser = 784
kVectorArrayData = 594
kVectorProduct = 522
kVertexBakeSet = 462
kVertexWeightSet = 1046
kViewColorManager = 658
kViewManip = 914
kVolumeAxis = 786
kVolumeBindManip = 1045
kVolumeFog = 856
kVolumeLight = 883
kVolumeNoise = 862
kVolumeShader = 523
kVortex = 264
kWater = 495
kWeightGeometryFilt = 343
kWire = 347
kWood = 508
kWorld = 247
kWrapFilter = 731
kWriteToColorBuffer = 1026
kWriteToDepthBuffer = 1028
kWriteToFrameBuffer = 1025
kWriteToLabelBuffer = 1029
kWriteToVectorBuffer = 1027
kXformManip = 915
kXsectionSubdivEdit = 805
class MGlobal(object):
"""
Static class providing common API global functions.
"""
def displayError(*args, **kwargs):
"""
displayError(msg) -> None
Display an error in the script editor.
"""
pass
def displayInfo(*args, **kwargs):
"""
displayInfo(msg) -> None
Display an informational message in the script editor.
"""
pass
def displayWarning(*args, **kwargs):
"""
displayWarning(msg) -> None
Display a warning in the script editor.
"""
pass
def getActiveSelectionList(*args, **kwargs):
"""
getActiveSelectionList() -> MSelectionList
Return an MSelectionList containing the nodes, components and
plugs currently selected in Maya.
"""
pass
def getFunctionSetList(*args, **kwargs):
"""
getFunctionSetList(MObject) -> (string, string, ...)
Returns a tuple of strings that represent the type of each function
set that will accept this object.
"""
pass
def getSelectionListByName(*args, **kwargs):
"""
getSelectionListByName(name) -> MSelectionList
Returns an MSelectionList with all of the objects that match the
specified name. The name may use the same type of regular expressions
as can be used in MEL commands. For example, the pattern 'pCube*' will
match all occurrences of objects whose names begin with 'pCube'.
"""
pass
kAddToHeadOfList = 4
kAddToList = 2
kBaseUIMode = 3
kBatch = 1
kInteractive = 0
kLibraryApp = 2
kRemoveFromList = 3
kReplaceList = 0
kSelectComponentMode = 1
kSelectLeafMode = 3
kSelectObjectMode = 0
kSelectRootMode = 2
kSelectTemplateMode = 4
kSurfaceSelectMethod = 0
kWireframeSelectMethod = 1
kXORWithList = 1
class MArgList(object):
"""
Argument list for passing to commands.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def addArg(*args, **kwargs):
"""
addArg(arg) -> self , 'arg' is a numeric value, MAngle, MDistance,
MTime, MPoint or MVector.
Add an argument to the end of the arg list.
"""
pass
def asAngle(*args, **kwargs):
"""
asAngle(index) -> MAngle
Return an argument as an MAngle.
"""
pass
def asBool(*args, **kwargs):
"""
asBool(index) -> bool
Return an argument as a boolean.
"""
pass
def asDistance(*args, **kwargs):
"""
asDistance(index) -> MDistance
Return an argument as an MDistance.
"""
pass
def asDouble(*args, **kwargs):
"""
asDouble(index) -> float
Alias for asFloat().
"""
pass
def asDoubleArray(*args, **kwargs):
"""
asDoubleArray(index) -> MDoubleArray
Return a sequence of arguments as an MDoubleArray.
"""
pass
def asFloat(*args, **kwargs):
"""
asFloat(index) -> float
Return an argument as a float.
"""
pass
def asInt(*args, **kwargs):
"""
asInt(index) -> int
Return an argument as an integer.
"""
pass
def asIntArray(*args, **kwargs):
"""
asIntArray(index) -> MIntArray
Return a sequence of arguments as an MIntArray.
"""
pass
def asMatrix(*args, **kwargs):
"""
asMatrix(index) -> MMatrix
Return a sequence of arguments as an MMatrix.
"""
pass
def asPoint(*args, **kwargs):
"""
asPoint(index) -> MPoint
Return a sequence of arguments as an MPoint.
"""
pass
def asString(*args, **kwargs):
"""
asString(index) -> string
Return an argument as a string.
"""
pass
def asStringArray(*args, **kwargs):
"""
asStringArray(index) -> list of strings
Return a sequence of arguments as a list of strings.
"""
pass
def asTime(*args, **kwargs):
"""
asTime(index) -> MTime
Return an argument as an MTime.
"""
pass
def asVector(*args, **kwargs):
"""
asVector(index) -> MVector
Return a sequence of arguments as an MVector.
"""
pass
def flagIndex(*args, **kwargs):
"""
flagIndex(shortFlag, longFlag=None) -> int
Return index of first occurrence of specified flag.
"""
pass
def lastArgUsed(*args, **kwargs):
"""
lastArgUsed() -> int
Return index of last argument used by the most recent as*() method.
"""
pass
__new__ = None
kInvalidArgIndex = 4294967295
class MFloatPointArray(object):
"""
Array of MFloatPoint values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MRampAttribute(object):
"""
Functionset for creating and working with ramp attributes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addEntries(*args, **kwargs):
"""
Adds entries to the ramp.
"""
pass
def deleteEntries(*args, **kwargs):
"""
Removes from the ramp those entries with the specified indices.
"""
pass
def getEntries(*args, **kwargs):
"""
Returns a tuple containing all of the entries in the ramp.
"""
pass
def getValueAtPosition(*args, **kwargs):
"""
Returns the value of the entry at the given position.
"""
pass
def hasIndex(*args, **kwargs):
"""
Return true if an entry is defined at this index.
"""
pass
def numEntries(*args, **kwargs):
"""
Returns the number of entries in the ramp.
"""
pass
def pack(*args, **kwargs):
"""
Change the indices numbering by re-ordering them from 0.
"""
pass
def setInterpolationAtIndex(*args, **kwargs):
"""
Sets the interpolation of the entry at the given index.
"""
pass
def setPositionAtIndex(*args, **kwargs):
"""
Sets the position of the entry at the given index.
"""
pass
def setRamp(*args, **kwargs):
"""
Set this ramp with one or multiple entries. Current entries are removed before adding the new one(s).
"""
pass
def setValueAtIndex(*args, **kwargs):
"""
Sets the value of the entry at the given index.
"""
pass
def sort(*args, **kwargs):
"""
Sort the ramp by position. Indices are also re-ordered during sort.
"""
pass
def createColorRamp(*args, **kwargs):
"""
Creates and returns a new color ramp attribute.
"""
pass
def createCurveRamp(*args, **kwargs):
"""
Creates and returns a new curve ramp attribute.
"""
pass
def createRamp(*args, **kwargs):
"""
Creates and returns a new color or curve ramp attribute initialized with values.
"""
pass
isColorRamp = None
isCurveRamp = None
__new__ = None
kLinear = 1
kNone = 0
kSmooth = 2
kSpline = 3
class MObject(object):
"""
Opaque wrapper for internal Maya objects.
"""
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def apiType(*args, **kwargs):
"""
Returns the function set type for the object.
"""
pass
def hasFn(*args, **kwargs):
"""
Tests whether object is compatible with the specified function set.
"""
pass
def isNull(*args, **kwargs):
"""
Tests whether there is an internal Maya object.
"""
pass
apiTypeStr = None
__new__ = None
kNullObj = None
class MWeight(object):
"""
Methods for accessing component weight data. This class is currently
only used to access soft select and symmetry selection weights.
Other weight data (e.g. deformer weights) does not use this class
and can be accessed through the corresponding MFn class or directly
from the node's attributes.
__init__()
Initializes a new MWeight object with influence weight of 1 and seam
weight of 0.
__init__(MWeight src)
Initializes a new MWeight object with the same value as src.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
influence = None
seam = None
__new__ = None
class MFloatVector(object):
"""
3D vector with single-precision coordinates.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __div__(*args, **kwargs):
"""
x.__div__(y) <==> x/y
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __idiv__(*args, **kwargs):
"""
x.__idiv__(y) <==> x/=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __neg__(*args, **kwargs):
"""
x.__neg__() <==> -x
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __rdiv__(*args, **kwargs):
"""
x.__rdiv__(y) <==> y/x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __rxor__(*args, **kwargs):
"""
x.__rxor__(y) <==> y^x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def __xor__(*args, **kwargs):
"""
x.__xor__(y) <==> x^y
"""
pass
def angle(*args, **kwargs):
"""
Returns the angle, in radians, between this vector and another.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Returns True if this vector and another are within a given tolerance of being equal.
"""
pass
def isParallel(*args, **kwargs):
"""
Returns True if this vector and another are within the given tolerance of being parallel.
"""
pass
def length(*args, **kwargs):
"""
Returns the magnitude of this vector.
"""
pass
def normal(*args, **kwargs):
"""
Returns a new vector containing the normalized version of this one.
"""
pass
def normalize(*args, **kwargs):
"""
Normalizes this vector in-place and returns a new reference to it.
"""
pass
def transformAsNormal(*args, **kwargs):
"""
Returns a new vector which is calculated by postmultiplying this vector by the transpose of the given matrix and then normalizing the result.
"""
pass
x = None
y = None
z = None
__new__ = None
kOneVector = None
kTolerance = 9.999999747378752e-06
kXaxisVector = None
kXnegAxisVector = None
kYaxisVector = None
kYnegAxisVector = None
kZaxisVector = None
kZeroVector = None
kZnegAxisVector = None
class MPlugArray(object):
"""
Array of MPlug values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MCallbackIdArray(object):
"""
Array of MCallbackId values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MNodeClass(object):
"""
A class for performing node class-level operations in the dependency graph.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def addExtensionAttribute(*args, **kwargs):
"""
Adds an extension attribute to the node class. An extension attribute is a class-level attribute which has been added dynamically to a node class. Because it is added at the class level, all nodes of that class will have the given attribute, and will only store the attribute's value if it differs from the default. Returns the type of the object at the end of the path.
"""
pass
def attribute(*args, **kwargs):
"""
If passed an int: Returns the node class's i'th attribute. Raises IndexError if index is out of bounds. If passed a string, Returns the node class's attribute having the given name. Returns MObject.kNullObj if the class does not have an attribute with that name.
"""
pass
def getAttributes(*args, **kwargs):
"""
Returns an MObjectArray array containing all of the node class's attributes.
"""
pass
def hasAttribute(*args, **kwargs):
"""
Returns True if the node class has an attribute of the given name, False otherwise.
"""
pass
def removeExtensionAttribute(*args, **kwargs):
"""
Removes an extension attribute from the node class. Raises ValueError if attr is not an extension attribute of this node class.
"""
pass
def removeExtensionAttributeIfUnset(*args, **kwargs):
"""
Removes an extension attribute from the node class, but only if there are no nodes in the graph with non-default values for this attribute. Returns True if the attribute was removed, False otherwise. Raises ValueError if attr is not an extension attribute of this node class.
"""
pass
attributeCount = None
classification = None
pluginName = None
typeId = None
typeName = None
__new__ = None
class MPxCommand(object):
"""
Base class for custom commands.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def doIt(*args, **kwargs):
"""
Called by Maya to execute the command.
"""
pass
def hasSyntax(*args, **kwargs):
"""
Called by Maya to determine if the command provides an MSyntax object describing its syntax.
"""
pass
def isUndoable(*args, **kwargs):
"""
Called by Maya to determine if the command supports undo.
"""
pass
def redoIt(*args, **kwargs):
"""
Called by Maya to redo a previously undone command.
"""
pass
def syntax(*args, **kwargs):
"""
Returns the command's MSyntax object, if it has one.
"""
pass
def undoIt(*args, **kwargs):
"""
Called by Maya to undo a previously executed command.
"""
pass
def appendToResult(*args, **kwargs):
"""
Append a value to the result to be returned by the command.
"""
pass
def clearResult(*args, **kwargs):
"""
Clears the command's result.
"""
pass
def currentResult(*args, **kwargs):
"""
Returns the command's current result.
"""
pass
def currentResultType(*args, **kwargs):
"""
Returns the type of the current result.
"""
pass
def displayError(*args, **kwargs):
"""
Display an error message.
"""
pass
def displayInfo(*args, **kwargs):
"""
Display an informational message.
"""
pass
def displayWarning(*args, **kwargs):
"""
Display a warning message.
"""
pass
def isCurrentResultArray(*args, **kwargs):
"""
Returns true if the command's current result is an array of values.
"""
pass
def setResult(*args, **kwargs):
"""
Set the value of the result to be returned by the command.
"""
pass
commandString = None
historyOn = None
__new__ = None
kDouble = 1
kLong = 0
kNoArg = 3
kString = 2
class MFnComponent(MFnBase):
"""
This is the base class for all function sets which deal with
component objects.
__init__()
Initializes a new, empty MFnComponent object
__init__(MObject component)
Initializes a new MFnComponent function set, attached to the specified component.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def isEqual(*args, **kwargs):
"""
isEqual(MObject other) -> bool
Returns True if other refers to the same component as the
one to which the function set is currently attached.
"""
pass
def weight(*args, **kwargs):
"""
weight(index) -> MWeight
Returns the weight associated with the specified element,
where index can range from 0 to elementCount-1.
"""
pass
componentType = None
elementCount = None
hasWeights = None
isComplete = None
isEmpty = None
__new__ = None
class MDagModifier(MDGModifier):
"""
Used to change the structure of the DAG
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def createNode(*args, **kwargs):
"""
createNode(typeName, parent=MObject.kNullObj) -> new DAG node MObject
createNode(typeId, parent=MObject.kNullObj) -> new DAG node MObject
Adds an operation to the modifier to create a DAG node of the specified
type. If a parent DAG node is provided the new node will be parented
under it. If no parent is provided and the new DAG node is a transform
type then it will be parented under the world. In both of these cases
the method returns the new DAG node.
If no parent is provided and the new DAG node is not a transform type
then a transform node will be created and the child parented under that. The new transform will be parented under the world and it is the
transform node which will be returned by the method, not the child.
None of the newly created nodes will be added to the DAG until the
modifier's doIt() method is called.
"""
pass
def reparentNode(*args, **kwargs):
"""
reparentNode(MObject node, newParent=MObject.kNullObj) -> self
Adds an operation to the modifier to reparent a DAG node under a
specified parent.
If no parent is provided then the DAG node will be reparented under the
world, so long as it is a transform type. If it is not a transform type
then the doIt() will raise a RuntimeError.
"""
pass
__new__ = None
class MFnData(MFnBase):
"""
Base class for dependency graph data function sets.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
__new__ = None
kAny = 23
kComponentList = 12
kDoubleArray = 7
kDynArrayAttrs = 18
kDynSweptGeometry = 19
kFloatArray = 8
kIntArray = 9
kInvalid = 0
kLast = 24
kLattice = 14
kMatrix = 5
kMesh = 13
kNId = 22
kNObject = 21
kNumeric = 1
kNurbsCurve = 15
kNurbsSurface = 16
kPlugin = 2
kPluginGeometry = 3
kPointArray = 10
kSphere = 17
kString = 4
kStringArray = 6
kSubdSurface = 20
kVectorArray = 11
class MFnPlugin(MFnBase):
"""
Register and deregister plug-in services with Maya.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def apiVersion(*args, **kwargs):
"""
Return the API version required by the plug-in.
"""
pass
def deregisterAttributePatternFactory(*args, **kwargs):
"""
Deregister a user defined attribute pattern factory type from Maya.
"""
pass
def deregisterCommand(*args, **kwargs):
"""
Deregister a user defined command from Maya.
"""
pass
def loadPath(*args, **kwargs):
"""
Return the full path name of the file from which the plug-in was loaded.
"""
pass
def name(*args, **kwargs):
"""
Return the plug-in's name.
"""
pass
def registerAttributePatternFactory(*args, **kwargs):
"""
Register a new attribute pattern factory type with Maya.
"""
pass
def registerCommand(*args, **kwargs):
"""
Register a new command with Maya.
"""
pass
def setName(*args, **kwargs):
"""
Set the plug-in's name.
"""
pass
def vendor(*args, **kwargs):
"""
Return the plug-in's vendor string.
"""
pass
version = None
__new__ = None
class MArgDatabase(MArgParser):
"""
Command argument list parser which extends MArgParser with the
ability to return arguments and objects as MSelectionLists
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def commandArgumentMSelectionList(*args, **kwargs):
"""
commandArgumentMSelectionList(argIndex) -> MSelectionList
Returns the specified command argument as an MSelectionList.
"""
pass
def flagArgumentMSelectionList(*args, **kwargs):
"""
flagArgumentMSelectionList(flagName, argIndex) -> MSelectionList
Returns the specified argument of the specified single-use flag as
an MSelectionList.
"""
pass
def getObjectList(*args, **kwargs):
"""
getObjectList() -> MSelectionList
If the command's MSyntax has set the object format to kSelectionList
then this method will return the objects passed to the command as an
MSelectionList. If any other object format is set then an empty
selection list will be returned.
"""
pass
__new__ = None
class MFnDependencyNode(MFnBase):
"""
Function set for operating on dependency nodes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addAttribute(*args, **kwargs):
"""
Adds a new dynamic attribute to the node.
"""
pass
def attribute(*args, **kwargs):
"""
Returns an attribute of the node, given either its index or name.
"""
pass
def attributeClass(*args, **kwargs):
"""
Returns the class of the specified attribute.
"""
pass
def attributeCount(*args, **kwargs):
"""
Returns the number of attributes on the node.
"""
pass
def canBeWritten(*args, **kwargs):
"""
Returns true if the node will be written to file.
"""
pass
def create(*args, **kwargs):
"""
Creates a new node of the given type.
"""
pass
def dgCallbackIds(*args, **kwargs):
"""
Returns DG timing information for a specific callback type, broken down by callbackId.
"""
pass
def dgCallbacks(*args, **kwargs):
"""
Returns DG timing information broken down by callback type.
"""
pass
def dgTimer(*args, **kwargs):
"""
Returns a specific DG timer metric for a given timer type.
"""
pass
def dgTimerOff(*args, **kwargs):
"""
Turns DG timing off for this node.
"""
pass
def dgTimerOn(*args, **kwargs):
"""
Turns DG timing on for this node.
"""
pass
def dgTimerQueryState(*args, **kwargs):
"""
Returns the current DG timer state for this node.
"""
pass
def dgTimerReset(*args, **kwargs):
"""
Resets all DG timers for this node.
"""
pass
def findAlias(*args, **kwargs):
"""
Returns the attribute which has the given alias.
"""
pass
def findPlug(*args, **kwargs):
"""
Returns a plug for the given attribute.
"""
pass
def getAffectedAttributes(*args, **kwargs):
"""
Returns all of the attributes which are affected by the specified attribute.
"""
pass
def getAffectingAttributes(*args, **kwargs):
"""
Returns all of the attributes which affect the specified attribute.
"""
pass
def getAliasAttr(*args, **kwargs):
"""
Returns the node's alias attribute, which is a special attribute used to store information about the node's attribute aliases.
"""
pass
def getAliasList(*args, **kwargs):
"""
Returns all of the node's attribute aliases.
"""
pass
def getConnections(*args, **kwargs):
"""
Returns all the plugs which are connected to attributes of this node.
"""
pass
def hasAttribute(*args, **kwargs):
"""
Returns True if the node has an attribute with the given name.
"""
pass
def hasUniqueName(*args, **kwargs):
"""
Returns True if the node's name is unique.
"""
pass
def isFlagSet(*args, **kwargs):
"""
Returns the state of the specified node flag.
"""
pass
def isNewAttribute(*args, **kwargs):
"""
Returns True if the specified attribute was added in the current scene, and not by by one of its referenced files.
"""
pass
def isTrackingEdits(*args, **kwargs):
"""
Returns True if the node is referenced or in an assembly that is tracking edits.
"""
pass
def name(*args, **kwargs):
"""
Returns the node's name.
"""
pass
def plugsAlias(*args, **kwargs):
"""
Returns the alias for a plug's attribute.
"""
pass
def removeAttribute(*args, **kwargs):
"""
Removes a dynamic attribute from the node.
"""
pass
def reorderedAttribute(*args, **kwargs):
"""
Returns one of the node's attribute, based on the order in which they are written to file.
"""
pass
def setAlias(*args, **kwargs):
"""
Adds or removes an attribute alias.
"""
pass
def setDoNotWrite(*args, **kwargs):
"""
Used to prevent the node from being written to file.
"""
pass
def setFlag(*args, **kwargs):
"""
Sets the state of the specified node flag.
"""
pass
def setName(*args, **kwargs):
"""
Sets the node's name.
"""
pass
def userNode(*args, **kwargs):
"""
Returns the MPxNode object for a plugin node.
"""
pass
def allocateFlag(*args, **kwargs):
"""
Allocates a flag on all nodes for use by the named plugin and returns the flag's index.
"""
pass
def classification(*args, **kwargs):
"""
Returns the classification string for the named node type.
"""
pass
def deallocateAllFlags(*args, **kwargs):
"""
Deallocates all node flags which are currently allocated to the named plugin.
"""
pass
def deallocateFlag(*args, **kwargs):
"""
Deallocates the specified node flag, which was previously allocated by the named plugin using allocateFlag().
"""
pass
isDefaultNode = None
isFromReferencedFile = None
isLocked = None
isShared = None
namespace = None
pluginName = None
typeId = None
typeName = None
__new__ = None
kExtensionAttr = 3
kInvalidAttr = 4
kLocalDynamicAttr = 1
kNormalAttr = 2
kTimerInvalidState = 3
kTimerMetric_callback = 0
kTimerMetric_callbackNotViaAPI = 6
kTimerMetric_callbackViaAPI = 5
kTimerMetric_compute = 1
kTimerMetric_computeDuringCallback = 7
kTimerMetric_computeNotDuringCallback = 8
kTimerMetric_dirty = 2
kTimerMetric_draw = 3
kTimerMetric_fetch = 4
kTimerOff = 0
kTimerOn = 1
kTimerType_count = 2
kTimerType_inclusive = 1
kTimerType_self = 0
kTimerUninitialized = 2
class MFnAttribute(MFnBase):
"""
Base class for attribute functionsets.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def accepts(*args, **kwargs):
"""
Returns True if this attribute can accept a connection of the given type.
"""
pass
def addToCategory(*args, **kwargs):
"""
Adds the attribute to a category
"""
pass
def getAddAttrCmd(*args, **kwargs):
"""
Returns a string containing a MEL 'addAttr' command capable of recreating the attribute.
"""
pass
def hasCategory(*args, **kwargs):
"""
Checks to see if the attribute has a given category
"""
pass
def setNiceNameOverride(*args, **kwargs):
"""
Sets a nice UI name for this attribute rather than using the default derived from it's long name.
"""
pass
affectsAppearance = None
affectsWorldSpace = None
array = None
cached = None
channelBox = None
connectable = None
disconnectBehavior = None
dynamic = None
extension = None
hidden = None
indeterminant = None
indexMatters = None
internal = None
keyable = None
name = None
parent = None
readable = None
renderSource = None
shortName = None
storable = None
usedAsColor = None
usedAsFilename = None
usesArrayDataBuilder = None
worldSpace = None
writable = None
__new__ = None
kDelete = 0
kNothing = 2
kReset = 1
class MFnEnumAttribute(MFnAttribute):
"""
Functionset for creating and working with enumeration attributes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addField(*args, **kwargs):
"""
Add an item to the enumeration with a specified UI name and corresponding attribute value.
"""
pass
def create(*args, **kwargs):
"""
Creates a new enumeration attribute, attaches it to the function set and returns it as an MObject.
"""
pass
def fieldName(*args, **kwargs):
"""
Returns the name of the enumeration item which has a given value.
"""
pass
def fieldValue(*args, **kwargs):
"""
Returns the value of the enumeration item which has a given name.
"""
pass
def getMax(*args, **kwargs):
"""
Returns the maximum value of all the enumeration items.
"""
pass
def getMin(*args, **kwargs):
"""
Returns the minimum value of all the enumeration items.
"""
pass
def setDefaultByName(*args, **kwargs):
"""
Set the default value using the name of an enumeration item. Equivalent to: attr.default = attr.fieldValue(name)
"""
pass
default = None
__new__ = None
class MFnDoubleIndexedComponent(MFnComponent):
"""
This function set allows you to create, edit, and query double indexed
components. Double indexed components store 2 dimensional index values.
__init__()
Initializes a new, empty MFnDoubleIndexedComponent object
__init__(MObject component)
Initializes a new MFnDoubleIndexedComponent function set, attached
to the specified component.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addElement(*args, **kwargs):
"""
addElement(uIndex, vIndex) -> self
addElement([uIndex, vIndex]) -> self
Adds the element identified by (uIndex, vIndex) to the component.
"""
pass
def addElements(*args, **kwargs):
"""
addElements(sequence of [uIndex, vIndex]) -> self
Adds the specified elements to the component. Each item in the
elements sequence is itself a sequence of two ints which are the U and
V indices of an element to be added.
"""
pass
def create(*args, **kwargs):
"""
create(MFn Type constant) -> MObject
Creates a new, empty component, attaches it to the function set and
returns an MObject which references it.
"""
pass
def getCompleteData(*args, **kwargs):
"""
getCompleteData() -> (numU, numV)
Returns a tuple containing the number of U and V indices in the complete
component, or (0,0) if the component is not complete.
"""
pass
def getElement(*args, **kwargs):
"""
getElement(index) -> (uIndex, vIndex)
Returns the index'th element of the component as a tuple containing the
element's U and V indices.
"""
pass
def getElements(*args, **kwargs):
"""
getElements() -> list of (uIndex, vIndex)
Returns all of the component's elements as a list of tuples with each
tuple containing the U and V indices of a single element.
"""
pass
def setCompleteData(*args, **kwargs):
"""
setCompleteData(numU, numV) -> self
Marks the component as complete (i.e. contains all possible elements).
numU and numV indicate the number of U and V indices in the complete
component (i.e. the max U index is numU-1 and the max V index is numV-1).
"""
pass
__new__ = None
class MFnGenericAttribute(MFnAttribute):
"""
Functionset for creating and working with attributes which can accept several different types of data.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addDataType(*args, **kwargs):
"""
Adds the specified Maya data type to the list of those accepted by the attribute.
"""
pass
def addNumericType(*args, **kwargs):
"""
Adds the specified numeric type to the list of those accepted by the attribute.
"""
pass
def addTypeId(*args, **kwargs):
"""
Adds the specified data typeId to the list of those accepted by the attribute.
"""
pass
def create(*args, **kwargs):
"""
Creates a new generic attribute, attaches it to the function set and returns it as an MObject.
"""
pass
def removeDataType(*args, **kwargs):
"""
Removes the specified Maya data type from the list of those accepted by the attribute.
"""
pass
def removeNumericType(*args, **kwargs):
"""
Removes the specified numeric type from the list of those accepted by the attribute.
"""
pass
def removeTypeId(*args, **kwargs):
"""
Removes the specified data typeId from the list of those accepted by the attribute.
"""
pass
__new__ = None
class MFnLightDataAttribute(MFnAttribute):
"""
Functionset for creating and working with light data attributes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def child(*args, **kwargs):
"""
Returns one of the attribute's children, specified by index.
"""
pass
def create(*args, **kwargs):
"""
Creates a new light data attribute, attaches it to the function set and returns it as an MObject.
"""
pass
default = None
__new__ = None
class MFnMatrixAttribute(MFnAttribute):
"""
Functionset for creating and working with matrix attributes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
Creates a new matrix attribute, attaches it to the function set and returns it as an MObject.
"""
pass
default = None
__new__ = None
kDouble = 1
kFloat = 0
class MFnPointArrayData(MFnData):
"""
Function set for node data consisting of an array of MPoints.
"""
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def array(*args, **kwargs):
"""
Returns the encapsulated array as an MPointArray.
"""
pass
def copyTo(*args, **kwargs):
"""
Replaces the elements of an array with those in the encapsulated array.
"""
pass
def create(*args, **kwargs):
"""
Creates a new MPoint array data object.
"""
pass
def set(*args, **kwargs):
"""
Sets values in the encapsulated array.
"""
pass
__new__ = None
class MFnMessageAttribute(MFnAttribute):
"""
Functionset for creating and working with message attributes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
Creates a new message attribute, attaches it to the function set and returns it as an MObject.
"""
pass
__new__ = None
class MFnTripleIndexedComponent(MFnComponent):
"""
This function set allows you to create, edit, and query triple indexed
components. Triple indexed components store 3 dimensional index values.
__init__()
Initializes a new, empty MFnTripleIndexedComponent object
__init__(MObject component)
Initializes a new MFnTripleIndexedComponent function set, attached
to the specified component.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addElement(*args, **kwargs):
"""
addElement(sIndex, tIndex, uIndex) -> self
addElement([sIndex, tIndex, uIndex]) -> self
Adds the element identified by (sIndex, tIndex, uIndex) to the component.
"""
pass
def addElements(*args, **kwargs):
"""
addElements(sequence of [sIndex, tIndex, uIndex]) -> self
Adds the specified elements to the component. Each item in the
elements sequence is itself a sequence of three ints which are the
S, T and U indices of an element to be added.
"""
pass
def create(*args, **kwargs):
"""
create(MFn Type constant) -> MObject
Creates a new, empty component, attaches it to the function set and
returns an MObject which references it.
"""
pass
def getCompleteData(*args, **kwargs):
"""
getCompleteData() -> (numS, numT, numU)
Returns a tuple containing the number of S, T and U indices in
the complete component, or (0,0,0) if the component is not complete.
"""
pass
def getElement(*args, **kwargs):
"""
getElement(index) -> (sIndex, tIndex, uIndex)
Returns the index'th element of the component as a tuple containing the
element's S, T and U indices.
"""
pass
def getElements(*args, **kwargs):
"""
getElements() -> list of (sIndex, tIndex, uIndex)
Returns all of the component's elements as a list of tuples with each
tuple containing the S, T and U indices of a single element.
"""
pass
def setCompleteData(*args, **kwargs):
"""
setCompleteData(numS, numT, numU) -> self
Marks the component as complete (i.e. contains all possible elements).
numS, numT and numU indicate the number of S, T and U indices
in the complete component (i.e. the max S index is numS-1, the max T
index is numT-1 and the max U index is numU-1).
"""
pass
__new__ = None
class MFnNumericAttribute(MFnAttribute):
"""
Functionset for creating and working with numeric attributes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def child(*args, **kwargs):
"""
Returns the specified child attribute of the parent attribute currently attached to the function set.
"""
pass
def create(*args, **kwargs):
"""
Creates a new simple or compound numeric attribute, attaches it to the function set and returns it in an MObject.
"""
pass
def createAddr(*args, **kwargs):
"""
Creates a new address attribute, attaches it to the function set and returns it in an MObject.
"""
pass
def createColor(*args, **kwargs):
"""
Creates a new color attribute, attaches it to the function set and returns it in an MObject.
"""
pass
def createPoint(*args, **kwargs):
"""
Creates a new 3D point attribute, attaches it to the function set and returns it in an MObject.
"""
pass
def getMax(*args, **kwargs):
"""
Returns the attribute's hard maximum value(s).
"""
pass
def getMin(*args, **kwargs):
"""
Returns the attribute's hard minimum value(s).
"""
pass
def getSoftMax(*args, **kwargs):
"""
Returns the attribute's soft maximum value.
"""
pass
def getSoftMin(*args, **kwargs):
"""
Returns the attribute's soft minimum value.
"""
pass
def hasMax(*args, **kwargs):
"""
Returns True if a hard maximum value has been specified for the attribute.
"""
pass
def hasMin(*args, **kwargs):
"""
Returns True if a hard minimum value has been specified for the attribute.
"""
pass
def hasSoftMax(*args, **kwargs):
"""
Returns True if a soft maximum value has been specified for the attribute.
"""
pass
def hasSoftMin(*args, **kwargs):
"""
Returns True if a soft minimum value has been specified for the attribute.
"""
pass
def numericType(*args, **kwargs):
"""
Returns the numeric type of the attribute currently attached to the function set.
"""
pass
def setMax(*args, **kwargs):
"""
Sets the attribute's hard maximum value(s).
"""
pass
def setMin(*args, **kwargs):
"""
Sets the attribute's hard minimum value(s).
"""
pass
def setSoftMax(*args, **kwargs):
"""
Sets the attribute's soft maximum value.
"""
pass
def setSoftMin(*args, **kwargs):
"""
Sets the attribute's soft minimum value.
"""
pass
default = None
__new__ = None
class MFnStringData(MFnData):
"""
Function set for string node data.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
Creates a new string data object.
"""
pass
def set(*args, **kwargs):
"""
Sets the value of the encapsulated string.
"""
pass
def string(*args, **kwargs):
"""
Returns the encapsulated string as a unicode object.
"""
pass
__new__ = None
class MFnStringArrayData(MFnData):
"""
Function set for node data consisting of an array of string.
"""
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def array(*args, **kwargs):
"""
Returns the encapsulated array as a list of unicode objects.
"""
pass
def create(*args, **kwargs):
"""
Creates a new string array data object.
"""
pass
def set(*args, **kwargs):
"""
Sets values in the encapsulated array.
"""
pass
__new__ = None
class MFnComponentListData(MFnData):
"""
MFnComponentListData allows the creation and manipulation of component list
(represented as MObjects) data objects for use in the dependency graph.
__init__()
Initializes a new, empty MFnComponentListData object.
__init__(MObject)
Initializes a new MFnComponentListData function set, attached
to the specified object.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def add(*args, **kwargs):
"""
add(MObject) -> self
Adds the specified component to the end of the list.
"""
pass
def clear(*args, **kwargs):
"""
clear() -> self
Removes all of the components from the list.
"""
pass
def create(*args, **kwargs):
"""
create() -> MObject
Creates a new, empty component list, attaches it to the
function set and returns an MObject which references it.
"""
pass
def get(*args, **kwargs):
"""
get(index) -> MObject
Returns a copy of the component at the specified index.
Raises IndexError if the index is out of range.
"""
pass
def has(*args, **kwargs):
"""
has(MObject) -> bool
Returns True if the list contains the specified
component, False otherwise.
"""
pass
def length(*args, **kwargs):
"""
length() -> int
Returns the number of components in the list.
"""
pass
def remove(*args, **kwargs):
"""
remove(MObject) -> self
remove(index) -> self
Removes the specified component from the list.
No exception is raised if the component is not in the list,
raises IndexError if index is out of range
"""
pass
__new__ = None
class MFnTypedAttribute(MFnAttribute):
"""
Functionset for creating and working typed attributes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def attrType(*args, **kwargs):
"""
Returns the type of data handled by the attribute.
"""
pass
def create(*args, **kwargs):
"""
Creates a new type attribute, attaches it to the function set and returns it as an MObject.
"""
pass
default = None
__new__ = None
class MFnUInt64ArrayData(MFnData):
"""
Function set for node data consisting of an array of MUint64.
"""
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def array(*args, **kwargs):
"""
Returns the encapsulated array as an MUint64Array.
"""
pass
def copyTo(*args, **kwargs):
"""
Replaces the elements of an array with those in the encapsulated array.
"""
pass
def create(*args, **kwargs):
"""
Creates a new MUint64 array data object.
"""
pass
def set(*args, **kwargs):
"""
Sets values in the encapsulated array.
"""
pass
__new__ = None
class MFnSingleIndexedComponent(MFnComponent):
"""
This function set allows you to create, edit, and query single indexed components.
Single indexed components store 1 dimensional index values.
__init__()
Initializes a new, empty MFnSingleIndexedComponent object
__init__(MObject component)
Initializes a new MFnSingleIndexedComponent function set, attached to the specified component.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addElement(*args, **kwargs):
"""
addElement(int element) -> self
Adds the specified element to the component.
"""
pass
def addElements(*args, **kwargs):
"""
addElements([int]) -> self
addElements(MIntArray) -> self
Adds the specified elements to the component.
"""
pass
def create(*args, **kwargs):
"""
create(MFn Type constant) -> MObject
Creates a new, empty component, attaches it to the function set and
returns an MObject which references it.
"""
pass
def element(*args, **kwargs):
"""
element(index) -> int
Returns the index'th element of the component.
"""
pass
def getCompleteData(*args, **kwargs):
"""
getCompleteData() -> int
Returns the number of elements in the complete component, or 0 if the component is not complete.
"""
pass
def getElements(*args, **kwargs):
"""
getElements() -> MIntArray
Returns all of the component's elements.
"""
pass
def setCompleteData(*args, **kwargs):
"""
setCompleteData(numElements) -> self
Marks the component as complete (i.e. contains all possible elements).
numElements indicates the number of elements in the complete component.
"""
pass
__new__ = None
class MFnDagNode(MFnDependencyNode):
"""
Function set for operating on DAG nodes.
__init__()
Initializes a new, empty MFnDagNode functionset.
__init__(MObject)
Initializes a new MFnDagNode functionset and attaches it to a
DAG node.
__init__(MDagPath)
Initializes a new MFnDagNode functionset and attaches it to a
DAG path.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addChild(*args, **kwargs):
"""
addChild(node, index=kNextPos, keepExistingParents=False) -> self
Makes a node a child of this one.
"""
pass
def child(*args, **kwargs):
"""
child(index) -> MObject
Returns the specified child of this node.
"""
pass
def childCount(*args, **kwargs):
"""
childCount() -> int
Returns the number of nodes which are children of this one.
"""
pass
def create(*args, **kwargs):
"""
create(type, name=None, parent=MObject.kNullObj) -> MObject
Creates a new DAG node of the specified type, with the given name.
The type may be either a type name or a type ID. If no name is given
then a unique name will be generated by combining the type name with
an integer.
If a parent is given then the new node will be parented under it and
the functionset will be attached to the newly-created node. The
newly-created node will be returned.
If no parent is given and the new node is a transform, it will be
parented under the world and the functionset will be attached to the
newly-created transform. The newly-created transform will be returned.
If no parent is given and the new node is not a transform then a
transform node will be created under the world, the new node will be
parented under it, and the functionset will be attached to the
transform. The transform will be returned.
"""
pass
def dagPath(*args, **kwargs):
"""
dagPath() -> MDagPath
Returns the DAG path to which this function set is attached. Raises a TypeError if the function set is attached to an MObject rather than a path.
"""
pass
def dagRoot(*args, **kwargs):
"""
dagRoot() -> MObject
Returns the root node of the first path leading to this node.
"""
pass
def duplicate(*args, **kwargs):
"""
duplicate(instance=False, instanceLeaf=False) -> MObject
Duplicates the DAG hierarchy rooted at the current node.
"""
pass
def fullPathName(*args, **kwargs):
"""
fullPathName() -> string
Returns the full path of the attached object, from the root of the DAG on down.
"""
pass
def getAllPaths(*args, **kwargs):
"""
getAllPaths() -> MDagPathArray
Returns all of the DAG paths which lead to the object to which this function set is attached.
"""
pass
def getPath(*args, **kwargs):
"""
getPath() -> MDagPath
Returns the DAG path to which this function set is attached, or the first path to the node if the function set is attached to an MObject.
"""
pass
def hasChild(*args, **kwargs):
"""
hasChild(node) -> bool
Returns True if the specified node is a child of this one.
"""
pass
def hasParent(*args, **kwargs):
"""
hasParent(node) -> bool
Returns True if the specified node is a parent of this one.
"""
pass
def instanceCount(*args, **kwargs):
"""
instanceCount(indirect) -> int
Returns the number of instances for this node.
"""
pass
def isChildOf(*args, **kwargs):
"""
isChildOf(node) -> bool
Returns True if the specified node is a parent of this one.
"""
pass
def isInstanced(*args, **kwargs):
"""
isInstanced(indirect=True) -> bool
Returns True if this node is instanced.
"""
pass
def isInstancedAttribute(*args, **kwargs):
"""
isInstancedAttribute(attr) -> bool
Returns True if the specified attribute is an instanced attribute of this node.
"""
pass
def isParentOf(*args, **kwargs):
"""
isParentOf(node) -> bool
Returns True if the specified node is a child of this one.
"""
pass
def parent(*args, **kwargs):
"""
parent(index) -> MObject
Returns the specified parent of this node.
"""
pass
def parentCount(*args, **kwargs):
"""
parentCount() -> int
Returns the number of parents this node has.
"""
pass
def partialPathName(*args, **kwargs):
"""
partialPathName() -> string
Returns the minimum path string necessary to uniquely identify the attached object.
"""
pass
def removeChild(*args, **kwargs):
"""
removeChild(node) -> self
Removes the child, specified by MObject, reparenting it under the world.
"""
pass
def removeChildAt(*args, **kwargs):
"""
removeChildAt(index) -> self
Removes the child, specified by index, reparenting it under the world.
"""
pass
def setObject(*args, **kwargs):
"""
setObject(MObject or MDagPath) -> self
Attaches the function set to the specified node or DAG path.
"""
pass
def transformationMatrix(*args, **kwargs):
"""
transformationMatrix() -> MMatrix
Returns the object space transformation matrix for this DAG node.
"""
pass
boundingBox = None
inModel = None
inUnderWorld = None
isInstanceable = None
isIntermediateObject = None
objectColor = None
useObjectColor = None
__new__ = None
kNextPos = 255
class MFnDoubleArrayData(MFnData):
"""
Function set for node data consisting of an array of doubles.
"""
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def array(*args, **kwargs):
"""
Returns the encapsulated array as an MDoubleArray.
"""
pass
def copyTo(*args, **kwargs):
"""
Replaces the elements of an array with those in the encapsulated array.
"""
pass
def create(*args, **kwargs):
"""
Creates a new double array data object.
"""
pass
def set(*args, **kwargs):
"""
Sets values in the encapsulated array.
"""
pass
__new__ = None
class MFnVectorArrayData(MFnData):
"""
Function set for node data consisting of an array of MVectors.
"""
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def array(*args, **kwargs):
"""
Returns the encapsulated array as an MVectorArray.
"""
pass
def copyTo(*args, **kwargs):
"""
Replaces the elements of an array with those in the encapsulated array.
"""
pass
def create(*args, **kwargs):
"""
Creates a new MVector array data object.
"""
pass
def set(*args, **kwargs):
"""
Sets values in the encapsulated array.
"""
pass
__new__ = None
class MFnNumericData(MFnData):
"""
Function set for non-simple numeric node data.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
Creates a new numeric data object.
"""
pass
def getData(*args, **kwargs):
"""
Returns a list containing the attached data object's data.
"""
pass
def numericType(*args, **kwargs):
"""
Returns the type of data in the attached data object.
"""
pass
def setData(*args, **kwargs):
"""
Sets the value of the data in the attached data object.
"""
pass
__new__ = None
k2Double = 14
k2Float = 11
k2Int = 8
k2Long = 8
k2Short = 5
k3Double = 15
k3Float = 12
k3Int = 9
k3Long = 9
k3Short = 6
k4Double = 16
kAddr = 17
kBoolean = 1
kByte = 2
kChar = 3
kDouble = 13
kFloat = 10
kInt = 7
kInvalid = 0
kLast = 18
kLong = 7
kShort = 4
class MFnGeometryData(MFnData):
"""
This class is the function set for geometry data.
Geometry data adds matrix and grouping (set) information to regular
data and is used to pass geometry types such as mesh, lattice, and
NURBS shape data through DG connections.
__init__()
Initializes a new, empty MFnGeometryData object
__init__(MObject)
Initializes a new MFnGeometryData function set, attached
to the specified object.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addObjectGroup(*args, **kwargs):
"""
addObjectGroup(id) -> self
Adds an object group with the given id to the object.
"""
pass
def addObjectGroupComponent(*args, **kwargs):
"""
addObjectGroupComponent(id, MObject component) -> self
Adds the members of the given component to the object group
with the given id.
"""
pass
def changeObjectGroupId(*args, **kwargs):
"""
changeObjectGroupId(sourceId, destId) -> self
Changes the id of the object group with the given id to the new id.
"""
pass
def copyObjectGroups(*args, **kwargs):
"""
copyObjectGroups(MObject inGeom) -> self
Copies the object groups from the given geometry data object.
"""
pass
def hasObjectGroup(*args, **kwargs):
"""
hasObjectGroup(id) -> self
Returns True if an object group with the given id is
contained in the data.
"""
pass
def objectGroup(*args, **kwargs):
"""
objectGroup(index) -> int
Returns the id of the index'th object group contained by the object.
"""
pass
def objectGroupComponent(*args, **kwargs):
"""
objectGroupComponent(id) -> MObject
Returns a component which contains the members of the object group
with the given id.
"""
pass
def objectGroupType(*args, **kwargs):
"""
objectGroupType(id) -> MFn Type constant
Returns the type of the component that the object group with the
given id contains.
"""
pass
def removeObjectGroup(*args, **kwargs):
"""
removeObjectGroup(id) -> self
Removes an object group with the given id from the object.
"""
pass
def removeObjectGroupComponent(*args, **kwargs):
"""
removeObjectGroupComponent(id, MObject component) -> self
Removes the members of the given component from the object group
with the given id.
"""
pass
def setObjectGroupComponent(*args, **kwargs):
"""
setObjectGroupComponent(id, MObject component) -> self
Sets the members of the object group with the given id
to be only those in the given component.
"""
pass
isIdentity = None
isNotIdentity = None
matrix = None
objectGroupCount = None
__new__ = None
class MFnUnitAttribute(MFnAttribute):
"""
Functionset for creating and working with angle, distance and time attributes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
Creates a new unit attribute, attaches it to the function set and returns it as an MObject.
"""
pass
def getMax(*args, **kwargs):
"""
Returns the attribute's hard maximum value.
"""
pass
def getMin(*args, **kwargs):
"""
Returns the attribute's hard minimum value.
"""
pass
def getSoftMax(*args, **kwargs):
"""
Returns the attribute's soft maximum value.
"""
pass
def getSoftMin(*args, **kwargs):
"""
Returns the attribute's soft minimum value.
"""
pass
def hasMax(*args, **kwargs):
"""
Returns True if the attribute has a hard maximum value.
"""
pass
def hasMin(*args, **kwargs):
"""
Returns True if the attribute has a hard minimum value.
"""
pass
def hasSoftMax(*args, **kwargs):
"""
Returns True if the attribute has a soft maximum value.
"""
pass
def hasSoftMin(*args, **kwargs):
"""
Returns True if the attribute has a soft minimum value.
"""
pass
def setMax(*args, **kwargs):
"""
Sets the attribute's hard maximum value.
"""
pass
def setMin(*args, **kwargs):
"""
Sets the attribute's hard minimum value.
"""
pass
def setSoftMax(*args, **kwargs):
"""
Sets the attribute's soft maximum value.
"""
pass
def setSoftMin(*args, **kwargs):
"""
Sets the attribute's soft minimum value.
"""
pass
def unitType(*args, **kwargs):
"""
Returns the type of data handled by the attribute.
"""
pass
default = None
__new__ = None
kAngle = 1
kDistance = 2
kInvalid = 0
kLast = 4
kTime = 3
class MFnIntArrayData(MFnData):
"""
Function set for node data consisting of an array of ints.
"""
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def array(*args, **kwargs):
"""
Returns the encapsulated array as an MIntArray.
"""
pass
def copyTo(*args, **kwargs):
"""
Replaces the elements of an array with those in the encapsulated array.
"""
pass
def create(*args, **kwargs):
"""
Creates a new int array data object.
"""
pass
def set(*args, **kwargs):
"""
Sets values in the encapsulated array.
"""
pass
__new__ = None
class MFnCompoundAttribute(MFnAttribute):
"""
Functionset for creating and working with compound attributes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addChild(*args, **kwargs):
"""
Add a child attribute.
"""
pass
def child(*args, **kwargs):
"""
Returns one of the attribute's children, specified by index.
"""
pass
def create(*args, **kwargs):
"""
Creates a new compound attribute, attaches it to the function set and returns it as an MObject.
"""
pass
def getAddAttrCmds(*args, **kwargs):
"""
Returns a list of MEL 'addAttr' commands capable of recreating the attribute and all of its children.
"""
pass
def numChildren(*args, **kwargs):
"""
Returns number of child attributes currently parented under the compound attribute.
"""
pass
def removeChild(*args, **kwargs):
"""
Remove a child attribute.
"""
pass
__new__ = None
class MFnMatrixData(MFnData):
"""
Function set for matrix node data.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
Creates a new matrix data object.
"""
pass
def isTransformation(*args, **kwargs):
"""
Returns True if the attached object is an MTransformationMatrix, False if it is an MMatrix.
"""
pass
def matrix(*args, **kwargs):
"""
Returns the encapsulated matrix as an MMatrix.
"""
pass
def set(*args, **kwargs):
"""
Sets the value of the encapsulated matrix.
"""
pass
def transformation(*args, **kwargs):
"""
Returns the encapsulated matrix as an MTransformationMatrix.
"""
pass
__new__ = None
class MFnMeshData(MFnGeometryData):
"""
MFnMeshData allows the creation and manipulation of Mesh
data objects for use in the dependency graph.
__init__()
Initializes a new, empty MFnMeshData object
__init__(MObject)
Initializes a new MFnMeshData function set, attached
to the specified object.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
create() -> MObject
Creates a new mesh data object, attaches it to this function set
and returns an MObject which references it.
"""
pass
__new__ = None
class MFnTransform(MFnDagNode):
"""
Function set for operating on transform nodes.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def clearRestPosition(*args, **kwargs):
"""
Clears the transform's rest position matrix.
"""
pass
def create(*args, **kwargs):
"""
Creates a new transform node and attaches it to the function set.
"""
pass
def enableLimit(*args, **kwargs):
"""
Enables or disables a specified limit type.
"""
pass
def isLimited(*args, **kwargs):
"""
Returns True if the specified limit type is enabled.
"""
pass
def limitValue(*args, **kwargs):
"""
Returns the value of the specified limit.
"""
pass
def resetFromRestPosition(*args, **kwargs):
"""
Resets the transform from its rest position matrix.
"""
pass
def restPosition(*args, **kwargs):
"""
Returns the transform's rest position matrix.
"""
pass
def rotateBy(*args, **kwargs):
"""
Adds an MEulerRotation or MQuaternion to the transform's rotation.
"""
pass
def rotateByComponents(*args, **kwargs):
"""
Adds to the transform's rotation using the individual components of an MEulerRotation or MQuaternion.
"""
pass
def rotateOrientation(*args, **kwargs):
"""
Returns the MQuaternion which orients the local rotation space.
"""
pass
def rotatePivot(*args, **kwargs):
"""
Returns the transform's rotate pivot.
"""
pass
def rotatePivotTranslation(*args, **kwargs):
"""
Returns the transform's rotate pivot translation.
"""
pass
def rotation(*args, **kwargs):
"""
Returns the transform's rotation as an MEulerRotation or MQuaternion.
"""
pass
def rotationComponents(*args, **kwargs):
"""
Returns the transform's rotation as the individual components of an MEulerRotation or MQuaternion.
"""
pass
def rotationOrder(*args, **kwargs):
"""
Returns the order of rotations when the transform's rotation is expressed as an MEulerRotation.
"""
pass
def scale(*args, **kwargs):
"""
Returns a list containing the transform's XYZ scale components.
"""
pass
def scaleBy(*args, **kwargs):
"""
Multiplies the transform's XYZ scale components by a sequence of three floats.
"""
pass
def scalePivot(*args, **kwargs):
"""
Returns the transform's scale pivot.
"""
pass
def scalePivotTranslation(*args, **kwargs):
"""
Returns the transform's scale pivot translation.
"""
pass
def setLimit(*args, **kwargs):
"""
Sets the value of the specified limit.
"""
pass
def setRestPosition(*args, **kwargs):
"""
Sets the transform's rest position matrix.
"""
pass
def setRotateOrientation(*args, **kwargs):
"""
Sets the MQuaternion which orients the local rotation space.
"""
pass
def setRotatePivot(*args, **kwargs):
"""
Sets the transform's rotate pivot.
"""
pass
def setRotatePivotTranslation(*args, **kwargs):
"""
Sets the transform's rotate pivot translation.
"""
pass
def setRotation(*args, **kwargs):
"""
Sets the transform's rotation using an MEulerRotation or MQuaternion.
"""
pass
def setRotationComponents(*args, **kwargs):
"""
Sets the transform's rotation using the individual components of an MEulerRotation or MQuaternion.
"""
pass
def setRotationOrder(*args, **kwargs):
"""
Sets the transform's rotation order.
"""
pass
def setScale(*args, **kwargs):
"""
Sets the transform's scale components.
"""
pass
def setScalePivot(*args, **kwargs):
"""
Sets the transform's scale pivot.
"""
pass
def setScalePivotTranslation(*args, **kwargs):
"""
Sets the transform's scale pivot translation.
"""
pass
def setShear(*args, **kwargs):
"""
Sets the transform's shear.
"""
pass
def setTransformation(*args, **kwargs):
"""
Sets the transform's attribute values to represent the given transformation matrix.
"""
pass
def setTranslation(*args, **kwargs):
"""
Sets the transform's translation.
"""
pass
def shear(*args, **kwargs):
"""
Returns a list containing the transform's shear components.
"""
pass
def shearBy(*args, **kwargs):
"""
Multiplies the transform's shear components by a sequence of three floats.
"""
pass
def transformation(*args, **kwargs):
"""
Returns the transformation matrix represented by this transform.
"""
pass
def translateBy(*args, **kwargs):
"""
Adds an MVector to the transform's translation.
"""
pass
def translation(*args, **kwargs):
"""
Returns the transform's translation as an MVector.
"""
pass
__new__ = None
kRotateMaxX = 13
kRotateMaxY = 15
kRotateMaxZ = 17
kRotateMinX = 12
kRotateMinY = 14
kRotateMinZ = 16
kScaleMaxX = 1
kScaleMaxY = 3
kScaleMaxZ = 5
kScaleMinX = 0
kScaleMinY = 2
kScaleMinZ = 4
kShearMaxXY = 7
kShearMaxXZ = 9
kShearMaxYZ = 11
kShearMinXY = 6
kShearMinXZ = 8
kShearMinYZ = 10
kTranslateMaxX = 19
kTranslateMaxY = 21
kTranslateMaxZ = 23
kTranslateMinX = 18
kTranslateMinY = 20
kTranslateMinZ = 22
class MFnNurbsCurveData(MFnGeometryData):
"""
MFnNurbsCurveData allows the creation and manipulation of Nurbs Curve
data objects for use in the dependency graph.
__init__()
Initializes a new, empty MFnNurbsCurveData object
__init__(MObject)
Initializes a new MFnNurbsCurveData function set, attached
to the specified object.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
create() -> MObject
Creates a new nurbs curve data object, attaches it to this function set
and returns an MObject which references it.
"""
pass
__new__ = None
class MFnMesh(MFnDagNode):
"""
Function set for operation on meshes (polygonal surfaces).
__init__()
Initializes a new, empty MFnMesh object.
__init__(MDagPath path)
Initializes a new MFnMesh object and attaches it to the DAG path
of a mesh node.
__init__(MObject nodeOrData)
Initializes a new MFnMesh object and attaches it to a mesh
node or mesh data object.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addHoles(*args, **kwargs):
"""
addHoles(faceIndex, vertices, loopCounts, mergeVertices=True, pointTolerance=kPointTolerance) -> self
Adds holes to a mesh polygon.
loopCounts is an array of vertex counts.
The first entry gives the count of vertices that make up the
first hole to add to the polygon (using that many entries in vertexArray). The following
entries in loopCounts give the count of vertices that make up each remaining hole,
using the following entries in vertexArray.
Therefore the sum of the entries of loopCounts should equal the total
length of vertexArray.
Note that holes should normally be specified with the opposite winding order
to the exterior polygon.
"""
pass
def addPolygon(*args, **kwargs):
"""
addPolygon(vertices, mergeVertices=True, pointTolerance=kPointTolerance, loopCounts=None) -> faceId
Adds a new polygon to the mesh, returning the index of the new
polygon. If mergeVertices is True and a new vertex is within
pointTolerance of an existing one, then they are 'merged' by reusing
the existing vertex and discarding the new one.
loopCounts allows for polygons with holes. If supplied, it is an array of integer vertex
counts. The first entry gives the count of vertices that make up the
exterior of the polygon (using that many entries in vertexArray). The following
entries in loopCounts give the count of vertices that make up each hole,
using the following entries in vertexArray.
Therefore the sum of the entries of loopCounts should equal the total
length of vertexArray.
Note that holes should normally be specified with the opposite winding order
to the exterior polygon.
"""
pass
def allIntersections(*args, **kwargs):
"""
allIntersections(raySource, rayDirection, space, maxParam,
testBothDirections, faceIds=None, triIds=None, idsSorted=False,
accelParams=None, tolerance=kIntersectTolerance, sortHits=False)
-> (hitPoints, hitRayParams, hitFaces, hitTriangles, hitBary1s, hitBary2s)
Finds all intersection of a ray starting at raySource and travelling
in rayDirection with the mesh.
If faceIds is specified, then only those faces will be considered
for intersection. If both faceIds and triIds are given, then the
triIds will be interpreted as face-relative and each pair of entries
will be taken as a (face, triangle) pair to be considered for
intersection. Thus, the face-triangle pair (10, 0) means the first
triangle on face 10. If neither faceIds nor triIds is given, then
all face-triangles in the mesh will be considered.
The maxParam and testBothDirections flags can be used to control the
radius of the search around the raySource point.
The search proceeds by testing all applicable face-triangles looking
for intersections. If the accelParams parameter is given then the
mesh builds an intersection acceleration structure based on it. This
acceleration structure is used to speed up the intersection
operation, sometimes by a factor of several hundred over the non-
accelerated case. Once created, the acceleration structure is cached
and will be reused the next time this method (or anyIntersection()
or allIntersections()) is called with an identically-configured
MMeshIsectAccelParams object. If a different MMeshIsectAccelParams
object is used, then the acceleration structure will be deleted and
re-created according to the new settings. Once created, the
acceleration structure will persist until either the object is
destroyed (or rebuilt by a construction history operation), or the
freeCachedIntersectionAccelerator() method is called. The
cachedIntersectionAcceleratorInfo() and
globalIntersectionAcceleratorsInfo() methods provide useful
information about the resource usage of individual acceleration
structures, and of all such structures in the system.
If the ray hits the mesh, the details of the intersection points
will be returned as a tuple containing the following:
* hitPoints (MFloatPointArray) - coordinates of the points hit, in
the space specified by the caller.* hitRayParams (MFloatArray) - parametric distances along the ray to
the points hit.* hitFaces (MIntArray) - IDs of the faces hit
* hitTriangles (MIntArray) - face-relative IDs of the triangles hit
* hitBary1s (MFloatArray) - first barycentric coordinate of the
points hit. If the vertices of the hitTriangle are (v1, v2, v3)
then the barycentric coordinates are such that the hitPoint =
(*hitBary1)*v1 + (*hitBary2)*v2 + (1-*hitBary1-*hitBary2)*v3.* hitBary2s (MFloatArray) - second barycentric coordinate of the
points hit.
If no point was hit then the arrays will all be empty.
"""
pass
def anyIntersection(*args, **kwargs):
"""
anyIntersection(raySource, rayDirection, space, maxParam,
testBothDirections, faceIds=None, triIds=None, idsSorted=False,
accelParams=None, tolerance=kIntersectTolerance)
-> (hitPoint, hitRayParam, hitFace, hitTriangle, hitBary1, hitBary2)
Finds any intersection of a ray starting at raySource and travelling
in rayDirection with the mesh.
If faceIds is specified, then only those faces will be considered
for intersection. If both faceIds and triIds are given, then the
triIds will be interpreted as face-relative and each pair of entries
will be taken as a (face, triangle) pair to be considered for
intersection. Thus, the face-triangle pair (10, 0) means the first
triangle on face 10. If neither faceIds nor triIds is given, then
all face-triangles in the mesh will be considered.
The maxParam and testBothDirections flags can be used to control the
radius of the search around the raySource point.
The search proceeds by testing all applicable face-triangles looking
for intersections. If the accelParams parameter is given then the
mesh builds an intersection acceleration structure based on it. This
acceleration structure is used to speed up the intersection
operation, sometimes by a factor of several hundred over the non-
accelerated case. Once created, the acceleration structure is cached
and will be reused the next time this method (or anyIntersection()
or allIntersections()) is called with an identically-configured
MMeshIsectAccelParams object. If a different MMeshIsectAccelParams
object is used, then the acceleration structure will be deleted and
re-created according to the new settings. Once created, the
acceleration structure will persist until either the object is
destroyed (or rebuilt by a construction history operation), or the
freeCachedIntersectionAccelerator() method is called. The
cachedIntersectionAcceleratorInfo() and
globalIntersectionAcceleratorsInfo() methods provide useful
information about the resource usage of individual acceleration
structures, and of all such structures in the system.
If the ray hits the mesh, the details of the intersection point
will be returned as a tuple containing the following:
* hitPoint (MFloatPoint) - coordinates of the point hit, in
the space specified by the caller.* hitRayParam (float) - parametric distance along the ray to
the point hit.* hitFace (int) - ID of the face hit
* hitTriangle (int) - face-relative ID of the triangle hit
* hitBary1 (float) - first barycentric coordinate of the
point hit. If the vertices of the hitTriangle are (v1, v2, v3)
then the barycentric coordinates are such that the hitPoint =
(*hitBary1)*v1 + (*hitBary2)*v2 + (1-*hitBary1-*hitBary2)*v3.* hitBary2 (float) - second barycentric coordinate of the point hit.
If no point was hit then the arrays will all be empty.
"""
pass
def assignColor(*args, **kwargs):
"""
assignColor(faceId, vertexIndex, colorId, colorSet='') -> self
Assigns a color from a colorSet to a specified vertex of a face.
"""
pass
def assignColors(*args, **kwargs):
"""
assignColors(colorIds, colorSet=') -> self
Assigns colors to all of the mesh's face-vertices. The colorIds
sequence must contain an entry for every vertex of every face, in
face order, meaning that the entries for all the vertices of face 0
come first, followed by the entries for the vertices of face 1, etc.
"""
pass
def assignUV(*args, **kwargs):
"""
assignUV(faceId, vertexIndex, uvId, uvSet='') -> self
Assigns a UV coordinate from a uvSet to a specified vertex of a face.
"""
pass
def assignUVs(*args, **kwargs):
"""
assignUVs(uvCounts, uvIds, uvSet='') -> self
Assigns UV coordinates to the mesh's face-vertices.
uvCounts contains the number of UVs to assign for each of the mesh's
faces. That number must equal the number of vertices in the
corresponding face or be 0 to indicate that no UVs will be assigned
to that face.
"""
pass
def booleanOp(*args, **kwargs):
"""
booleanOp(Boolean Operation constant, MFnMesh, MFnMesh) -> self
Replaces this mesh's geometry with the result of a boolean operation
on the two specified meshes.
"""
pass
def cachedIntersectionAcceleratorInfo(*args, **kwargs):
"""
cachedIntersectionAcceleratorInfo() -> string
Retrieves a string that describes the intersection acceleration
structure for this object, if any. The string will be of the
following form:
10x10x10 uniform grid, (build time 0.5s), (memory footprint 2000KB)
It describes the configuration of the cached intersection
accelerator, as well as how long it took to build it, and how much
memory it is currently occupying. If the mesh has no cached
intersection accelerator, the empty string is returned.
"""
pass
def cleanupEdgeSmoothing(*args, **kwargs):
"""
cleanupEdgeSmoothing() -> self
Updates the mesh after setEdgeSmoothing has been done. This should
be called only once, after all the desired edges have been had their
smoothing set. If you don't call this method, the normals may not be
correct, and the object will look odd in shaded mode.
"""
pass
def clearBlindData(*args, **kwargs):
"""
clearBlindData(compType) -> self
clearBlindData(compType, blindDataId, compId=None, attr='') -> self
The first version deletes all blind data from all the mesh's
components of the given type (an MFn Type constant).
The second version deletes values of the specified blind data type
from the mesh's components of a given type. If a component ID is
provided then the data is only deleted from that component,
otherwise it is deleted from all of the mesh's components of the
specified type. If a blind data attribute name is provided then only
data for that attribute is deleted, otherwise data for all of the
blind data type's attributes is deleted.
"""
pass
def clearColors(*args, **kwargs):
"""
clearColors(colorSet='') -> self
Clears out all colors from a colorSet, and leaves behind an empty
colorset. This method should be used if it is needed to shrink the
actual size of the color set. In this case, the user should call
clearColors(), setColors() and then assignColors() to rebuild the
mapping info.
When called on mesh data, the colors are removed. When called on a
shape with no history, the colors are removed and the attributes are
set on the shape. When called on a shape with history, the
polyColorDel command is invoked and a polyColorDel node is created.
If no colorSet is specified the mesh's current color set will be used.
"""
pass
def clearUVs(*args, **kwargs):
"""
clearUVs(uvSet='') -> self
Clears out all uvs from a uvSet, and leaves behind an empty
uvset. This method should be used if it is needed to shrink the
actual size of the uv set. In this case, the user should call
clearUVs(), setUVs() and then assignUVs() to rebuild the
mapping info.
When called on mesh data, the uvs are removed. When called on a
shape with no history, the uvs are removed and the attributes are
set on the shape. When called on a shape with history, the
polyMapDel command is invoked and a polyMapDel node is created.
If no uvSet is specified the mesh's current uv set will be used.
"""
pass
def closestIntersection(*args, **kwargs):
"""
closestIntersection(raySource, rayDirection, space, maxParam,
testBothDirections, faceIds=None, triIds=None, idsSorted=False,
accelParams=None, tolerance=kIntersectTolerance)
-> (hitPoint, hitRayParam, hitFace, hitTriangle, hitBary1, hitBary2)
Finds the closest intersection of a ray starting at raySource and
travelling in rayDirection with the mesh.
If faceIds is specified, then only those faces will be considered
for intersection. If both faceIds and triIds are given, then the
triIds will be interpreted as face-relative and each pair of entries
will be taken as a (face, triangle) pair to be considered for
intersection. Thus, the face-triangle pair (10, 0) means the first
triangle on face 10. If neither faceIds nor triIds is given, then
all face-triangles in the mesh will be considered.
The maxParam and testBothDirections flags can be used to control the
radius of the search around the raySource point.
The search proceeds by testing all applicable face-triangles looking
for intersections. If the accelParams parameter is given then the
mesh builds an intersection acceleration structure based on it. This
acceleration structure is used to speed up the intersection
operation, sometimes by a factor of several hundred over the non-
accelerated case. Once created, the acceleration structure is cached
and will be reused the next time this method (or anyIntersection()
or allIntersections()) is called with an identically-configured
MMeshIsectAccelParams object. If a different MMeshIsectAccelParams
object is used, then the acceleration structure will be deleted and
re-created according to the new settings. Once created, the
acceleration structure will persist until either the object is
destroyed (or rebuilt by a construction history operation), or the
freeCachedIntersectionAccelerator() method is called. The
cachedIntersectionAcceleratorInfo() and
globalIntersectionAcceleratorsInfo() methods provide useful
information about the resource usage of individual acceleration
structures, and of all such structures in the system.
If the ray hits the mesh, the details of the intersection point
will be returned as a tuple containing the following:
* hitPoint (MFloatPoint) - coordinates of the point hit, in
the space specified by the caller.* hitRayParam (float) - parametric distance along the ray to
the point hit.* hitFace (int) - ID of the face hit
* hitTriangle (int) - face-relative ID of the triangle hit
* hitBary1 (float) - first barycentric coordinate of the
point hit. If the vertices of the hitTriangle are (v1, v2, v3)
then the barycentric coordinates are such that the hitPoint =
(*hitBary1)*v1 + (*hitBary2)*v2 + (1-*hitBary1-*hitBary2)*v3.* hitBary2 (float) - second barycentric coordinate of the point hit.
If no point was hit then the arrays will all be empty.
"""
pass
def collapseEdges(*args, **kwargs):
"""
collapseEdges(seq of int) -> self
Collapses edges into vertices. The two vertices that create each
given edge are replaced in turn by one vertex placed at the average
of the two initial vertex.
"""
pass
def collapseFaces(*args, **kwargs):
"""
collapseFaces(seq of int) -> self
Collapses faces into vertices. Adjacent faces will be collapsed
together into a single vertex. Non-adjacent faces will be collapsed
into their own, separate vertices.
"""
pass
def copy(*args, **kwargs):
"""
copy(MObject, parent=kNullObj) -> MObject
Creates a new mesh with the same geometry as the source. Raises
TypeError if the source is not a mesh node or mesh data object or it
contains an empty mesh.
If the parent is a kMeshData wrapper (e.g. from MFnMeshData.create())
then a mesh data object will be created and returned and the wrapper
will be set to reference it.
If the parent is a transform type node then a mesh node will be
created and parented beneath it and the return value will be the
mesh node.
If the parent is any other type of node a TypeError will be raised.
If no parent is provided then a transform node will be created and
returned and a mesh node will be created and parented under the
transform.
"""
pass
def copyInPlace(*args, **kwargs):
"""
copyInPlace(MObject) -> self
Replaces the current mesh's geometry with that from the source.
Raises TypeError if the source is not a mesh node or mesh data
object or it contains an empty mesh.
"""
pass
def copyUVSet(*args, **kwargs):
"""
copyUVSet(fromName, toName, modifier=None) -> string
Copies the contents of one UV set into another.
If the source UV set does not exist, or if it has the same name as
the destination, then no copy will be made.
If the destination UV set exists then its contents will be replace
by a copy of the source UV set.
If the destination UV set does not exist then a new UV set will be
created and the source UV set will be copied into it. The name of
the UV set will be that provided with a number appended to the end
to ensure uniqueness.
The final name of the destination UV set will be returned.
This method is only valid for functionsets which are attached to
mesh nodes, not mesh data.
"""
pass
def create(*args, **kwargs):
"""
create(vertices, polygonCounts, polygonConnects, uValues=None, vValues=None, parent=kNullObj) -> MObject
Creates a new polygonal mesh and sets this function set to operate
on it. This method is meant to be as efficient as possible and thus
assumes that all the given data is topologically correct.
If UV values are supplied both parameters must be given and they
must contain the same number of values, otherwise IndexError will be
raised. Note that the UVs are simply stored in the mesh, not
assigned to any vertices. To assign them use assignUVs().
If the parent is a kMeshData wrapper (e.g. from MFnMeshData.create())
then a mesh data object will be created and returned and the wrapper
will be set to reference it.
If the parent is a transform type node then a mesh node will be
created and parented beneath it and the return value will be the
mesh node.
If the parent is any other type of node a TypeError will be raised.
If no parent is provided then a transform node will be created and
returned and a mesh node will be created and parented under the
transform.
"""
pass
def createBlindDataType(*args, **kwargs):
"""
createBlindDataType(blindDataId, ((longName, shortName, typeName), ...)) -> self
Create a new blind data type with the specified attributes.
Each element of the attrs sequence is a tuple containing the long
name, short name and type name of the attribute. Valid type names
are 'int', 'float', 'double', 'boolean', 'string' or 'binary'.
Raises RuntimeError if the blind data id is already in use or an
invalid format was specified.
"""
pass
def createColorSet(*args, **kwargs):
"""
createColorSet(name, clamped, rep=kRGBA, modifier=None, instances=None) -> string
Creates a new, empty color set for this mesh.
If no name is provided 'colorSet#' will be used, where # is a number
that makes the name unique for this mesh. If a name is provided but
it conflicts with that of an existing color set then a number will
be appended to the proposed name to make it unique.
The return value is the final name used for the new color set.
This method will only work when the functionset is attached to a
mesh node, not mesh data.
"""
pass
def createInPlace(*args, **kwargs):
"""
createInPlace(vertices, polygonCounts, polygonConnects) -> self
Replaces the existing polygonal mesh with a new one. This method is
meant to be as efficient as possible and thus assumes that all the
given data is topologically correct.
The vertices may be given as a sequence of MFloatPoint's or a
sequence of MPoint's, but not a mix of the two.
"""
pass
def createUVSet(*args, **kwargs):
"""
createUVSet(name, modifier=None, instances=None) -> string
Creates a new, empty UV set for this mesh.
If a UV set with proposed name already exists then a number will be
appended to the proposed name to name it unique.
If the proposed name is empty then a name of the form uvSet# will be
used where '#' is a number chosen to ensure that the name is unique.
The name used for the UV set will be returned.
This method is only valid for functionsets which are attached to
mesh nodes, not mesh data.
"""
pass
def currentColorSetName(*args, **kwargs):
"""
currentColorSetName(instance=kInstanceUnspecified) -> string
Get the name of the 'current' color set. The current color set is
the one used for color operations when no color set is explicitly
specified.
On instanced meshes, color sets may be applied on a per-instance
basis or may be shared across all instances. When the color sets are
per-instance, the concept of the current color set has two levels of
granularity. Namely, the current color set applies to one or more
instances, plus there are other color sets in the same color set
family that apply to different instances. The instance arguement is
used to indicate that if this is a per-instance color set, you are
interested in the name of the color set that applies to the
specified instance. When the index is not specified, the current
color set will be returned regardless of which instance it is for.
If there is no current color set, then an empty string will be
returned.
"""
pass
def currentUVSetName(*args, **kwargs):
"""
currentUVSetName(instance=kInstanceUnspecified) -> string
Get the name of the 'current' uv set. The current uv set is
the one used for uv operations when no uv set is explicitly
specified.
On instanced meshes, uv sets may be applied on a per-instance
basis or may be shared across all instances. When the uv sets are
per-instance, the concept of the current uv set has two levels of
granularity. Namely, the current uv set applies to one or more
instances, plus there are other uv sets in the same uv set
family that apply to different instances. The instance arguement is
used to indicate that if this is a per-instance uv set, you are
interested in the name of the uv set that applies to the
specified instance. When the index is not specified, the current
uv set will be returned regardless of which instance it is for.
If there is no current uv set, then an empty string will be
returned.
"""
pass
def deleteColorSet(*args, **kwargs):
"""
deleteColorSet(colorSet, modifier=None, currentSelection=None) -> self
Deletes a color set from the mesh.
This method is only valid for functionsets which are attached to
mesh nodes, not mesh data.
"""
pass
def deleteEdge(*args, **kwargs):
"""
deleteEdge(edgeId, modifier=None) -> self
Deletes the specified edge.
"""
pass
def deleteFace(*args, **kwargs):
"""
deleteFace(faceId, modifier=None) -> self
Deletes the specified face.
"""
pass
def deleteUVSet(*args, **kwargs):
"""
deleteUVSet(uvSet, modifier=None, currentSelection=None) -> self
Deletes a uv set from the mesh.
This method is only valid for functionsets which are attached to
mesh nodes, not mesh data.
"""
pass
def deleteVertex(*args, **kwargs):
"""
deleteVertex(vertexId, modifier=None) -> self
Deletes the specified vertex.
"""
pass
def duplicateFaces(*args, **kwargs):
"""
duplicateFaces(faces, translation=None) -> self
Duplicates a set of faces and detaches them from the rest of the
mesh. The resulting mesh will contain one more independant piece of
geometry.
"""
pass
def extractFaces(*args, **kwargs):
"""
extractFaces(faces, translation=None) -> self
Detaches a set of faces from the rest of the mesh. The resulting
mesh will contain one more independant piece of geometry.
"""
pass
def extrudeEdges(*args, **kwargs):
"""
extrudeEdges(edges, extrusionCount=1, translation=None, extrudeTogether=True) -> self
Extrude the given edges along a vector. The resulting mesh will have
extra parallelograms coming out of the given edges and going to the
new extruded edges. The length of the new polygon is determined by
the length of the vector. The extrusionCount parameter is the number
of subsequent extrusions per edges and represents the number of
polygons that will be created from each given edge to the extruded
edges.
"""
pass
def extrudeFaces(*args, **kwargs):
"""
extrudeFaces(faces, extrusionCount=1, translation=None, extrudeTogether=True) -> self
Extrude the given faces along a vector. The resulting mesh will have
extra parallelograms coming out of the given faces and going to the
new extruded faces. The length of the new polygon is determined by
the length of the vector. The extrusionCount parameter is the number
of subsequent extrusions per faces and represents the number of
polygons that will be created from each given face to the extruded
faces.
"""
pass
def freeCachedIntersectionAccelerator(*args, **kwargs):
"""
freeCachedIntersectionAccelerator() -> self
If the mesh has a cached intersection accelerator structure, then
this routine forces it to be deleted. Ordinarily, these structures
are cached so that series of calls to the closestIntersection(),
allIntersections(), and anyIntersection() methods can reuse the same
structure. Once the client is finished with these intersection
operations, however, they are responsible for freeing the acceleration
structure, which is what this method does.
"""
pass
def generateSmoothMesh(*args, **kwargs):
"""
generateSmoothMesh(parent=kNullObj, options=None) -> MObject
Creates a new polygonal mesh which is a smoothed version of the one
to which the functionset is attached. If an options object is supplied
it will be used to direct the smoothing operation, otherwise the
mesh's Smooth Mesh Preview attributes will be used.
If the parent is a kMeshData wrapper (e.g. from MFnMeshData.create())
then a mesh data object will be created and returned.
If the parent is a transform type node then a mesh node will be
created and parented beneath it and the return value will be the
mesh node.
If the parent is any other type of node a TypeError will be raised.
If no parent is provided then a transform node will be created and
returned and a mesh node will be created and parented under the
transform.
Note that, unlike the create functions, this function does not set
the functionset to operate on the new mesh, but leaves it attached
to the original mesh.
"""
pass
def getAssignedUVs(*args, **kwargs):
"""
getAssignedUVs(uvSet='') -> (counts, uvIds)
Returns a tuple containing all of the UV assignments for the specified
UV set. The first element of the tuple is an array of counts giving
the number of UVs assigned to each face of the mesh. The count will
either be zero, indicating that that face's vertices do not have UVs
assigned, or else it will equal the number of the face's vertices.
The second element of the tuple is an array of UV IDs for all of the
face-vertices which have UVs assigned.
"""
pass
def getAssociatedColorSetInstances(*args, **kwargs):
"""
getAssociatedColorSetInstances(colorSet) -> MIntArray
Returns the instance numbers associated with the specified Color set.
If the color map is shared across all instances, an empty array will
be returned.
This method will only work if the functionset is attached to a mesh
node. It will raise RuntimeError if the functionset is attached to
mesh data.
"""
pass
def getAssociatedUVSetInstances(*args, **kwargs):
"""
getAssociatedUVSetInstances(uvSet) -> MIntArray
Returns the instance numbers associated with the specified UV set.
If the uv map is shared across all instances, an empty array will be
returned.
This method will only work if the functionset is attached to a mesh
node. It will raise RuntimeError if the functionset is attached to
mesh data.
"""
pass
def getAssociatedUVSetTextures(*args, **kwargs):
"""
getAssociatedUVSetTextures(uvSet) -> MObjectArray
Returns the texture nodes which are using the specified UV set. If
the texture has a 2d texture placement, the texture, and not the
placement will be returned.
This method will only work if the functionset is attached to a mesh
node. It will raise RuntimeError if the functionset is attached to
mesh data.
"""
pass
def getBinaryBlindData(*args, **kwargs):
"""
getBinaryBlindData(compId, compType, blindDataId, attr) -> string
getBinaryBlindData(compType, blindDataId, attr)
-> (MIntArray, [string, string, ...])
The first version returns the value of the specified blind data
attribute from the specified mesh component.
The second version returns a tuple containing an array of component
IDs and an array of values for the specified blind data attribute
for all of the mesh's components of the specified type.
Both versions raise RuntimeError if the attribute is not of 'binary'
type.
"""
pass
def getBinormals(*args, **kwargs):
"""
getBinormals(space=MSpace.kObject, uvSet='') -> MFloatVectorArray
Returns the binormal vectors for all face-vertices.
This method is not threadsafe.
"""
pass
def getBlindDataAttrNames(*args, **kwargs):
"""
getBlindDataAttrNames(blindDataId) -> ((longName, shortName, typeName), ...)
Returns a tuple listing the attributes of the given blind data type.
Each element of the tuple is itself a tuple containing the long
name, short name and type name of the attribute. Type names can be
'int', 'float', 'double', 'boolean', 'string' or 'binary'.
"""
pass
def getBlindDataTypes(*args, **kwargs):
"""
getBlindDataTypes(MFn Type constant) -> MIntArray
Returns all the blind data ID's associated with the given component
type on this mesh.
"""
pass
def getBoolBlindData(*args, **kwargs):
"""
getBoolBlindData(compId, compType, blindDataId, attr) -> bool
getBoolBlindData(compType, blindDataId, attr) -> (MIntArray, MIntArray)
The first version returns the value of the specified blind data
attribute from the specified mesh component.
The second version returns a tuple containing an array of component
IDs and an array of values for the specified blind data attribute
for all of the mesh's components of the specified type.
Both versions raise RuntimeError if the attribute is not of
'boolean' type.
"""
pass
def getClosestNormal(*args, **kwargs):
"""
getClosestNormal(MPoint, space=MSpace.kObject) -> (MVector, int)
Returns a tuple containing the normal at the closest point on the
mesh to the given point and the ID of the face in which that closest
point lies.
"""
pass
def getClosestPoint(*args, **kwargs):
"""
getClosestPoint(MPoint, space=MSpace.kObject) -> (MPoint, int)
Returns a tuple containing the closest point on the mesh to the
given point and the ID of the face in which that closest point lies.
This method is not threadsafe.
"""
pass
def getClosestPointAndNormal(*args, **kwargs):
"""
getClosestPointAndNormal(MPoint, space=MSpace.kObject)
-> (MPoint, MVector, int)
Returns a tuple containing the closest point on the mesh to the
given point, the normal at that point, and the ID of the face in
which that point lies.
This method is not threadsafe.
"""
pass
def getColor(*args, **kwargs):
"""
getColor(colorId, colorSet='') -> MColor
Returns a color from a colorSet. Raises IndexError if the colorId is
out of range.
"""
pass
def getColorIndex(*args, **kwargs):
"""
getColorIndex(faceId, localVertexId, colorSet='') -> int
Returns the index into the specified colorSet of the color used by a
specific face-vertex. This can be used to index into the sequence
returned by getColors().
"""
pass
def getColorRepresentation(*args, **kwargs):
"""
getColorRepresentation(colorSet) -> Color Representation constant
Returns the Color Representation used by the specified color set.
"""
pass
def getColorSetFamilyNames(*args, **kwargs):
"""
getColorSetFamilyNames() -> (string, ...)
Returns the names of all of the color set families on this object. A
color set family is a set of per-instance sets with the same name
with each individual set applying to one or more instances. A set
which is shared across all instances will be the sole member of its
family.
Given a color set family name, getColorSetsInFamily() may be used to
determine the names of the associated individual sets.
"""
pass
def getColorSetNames(*args, **kwargs):
"""
getColorSetNames() -> (string, ...)
Returns the names of all the color sets on this object.
"""
pass
def getColorSetsInFamily(*args, **kwargs):
"""
getColorSetsInFamily(familyName) -> (string, ...)
Returns the names of all of the color sets that belong to the
specified family. Per-instance sets will have multiple sets in a
family, with each individual set applying to one or more instances.
A set which is shared across all instances will be the sole member
of its family and will share the same name as its family.
"""
pass
def getColors(*args, **kwargs):
"""
getColors(colorSet='') -> MColorArray
Returns all of the colors in a colorSet. If no colorSet is specified
then the default colorSet is used.
Use the index returned by getColorIndex() to access the returned
array.
"""
pass
def getConnectedSetsAndMembers(*args, **kwargs):
"""
getConnectedSetsAndMembers(instance, renderableSetsOnly) -> (MObjectArray, MObjectArray)
Returns a tuple containing an array of sets and an array of the
components of the mesh which are in those sets. If a component has
no elements in it that means that the entire mesh is in the set.
This method will only work if the functionset is attached to a mesh
node. It will raise RuntimeError if the functionset is attached to
mesh data.
"""
pass
def getConnectedShaders(*args, **kwargs):
"""
getConnectedShaders(instance) -> (MObjectArray, MIntArray)
Returns a tuple containing an array of shaders (sets) and an array
of ints mapping the mesh's polygons onto those shaders. For each
polygon in the mesh there will be corresponding value in the second
array. If it is -1 that means that the polygon is not assigned to a
shader, otherwise it indicates the index into the first array of the
shader to which that polygon is assigned.
This method will only work if the functionset is attached to a mesh
node. It will raise RuntimeError if the functionset is attached to
mesh data.
"""
pass
def getCreaseEdges(*args, **kwargs):
"""
getCreaseEdges() -> (MUintArray, MDoubleArray)
Returns a tuple containing two arrays. The first contains the mesh-
relative/global IDs of the mesh's creased edges and the second
contains the associated crease data.
Please note that to make effective use of the creasing variable in
software outside of Maya may require a license under patents owned
by Pixar(R).
"""
pass
def getCreaseVertices(*args, **kwargs):
"""
getCreaseVertices() -> (MUintArray, MDoubleArray)
Returns a tuple containing two arrays. The first contains the mesh-
relative/global IDs of the mesh's creased vertices and the second
contains the associated crease data.
Please note that to make effective use of the creasing variable in
software outside of Maya may require a license under patents owned
by Pixar(R).
"""
pass
def getDoubleBlindData(*args, **kwargs):
"""
getDoubleBlindData(compId, compType, blindDataId, attr) -> float
getDoubleBlindData(compType, blindDataId, attr) -> (MIntArray, MDoubleArray)
The first version returns the value of the specified blind data
attribute from the specified mesh component.
The second version returns a tuple containing an array of component
IDs and an array of values for the specified blind data attribute
for all of the mesh's components of the specified type.
Both versions raise RuntimeError if the attribute is not of
'double' type.
"""
pass
def getEdgeVertices(*args, **kwargs):
"""
getEdgeVertices(edgeId) -> (int, int)
Returns a tuple containing the mesh-relative/global IDs of the
edge's two vertices. The indices can be used to refer to the
elements in the array returned by the getPoints() method.
"""
pass
def getFaceAndVertexIndices(*args, **kwargs):
"""
getFaceAndVertexIndices(faceVertexIndex, localVertex=True) -> (int, int)
Returns a tuple containg the faceId and vertexIndex represented by
the given face-vertex index. This is the reverse of the operation
performed by getFaceVertexIndex().
If localVertex is True then the returned vertexIndex is the face-
relative/local index, otherwise it is the mesh-relative/global index.
"""
pass
def getFaceNormalIds(*args, **kwargs):
"""
getFaceNormalIds(faceId) -> MIntArray
Returns the IDs of the normals for all the vertices of a given face.
These IDs can be used to index into the arrays returned by getNormals().
"""
pass
def getFaceUVSetNames(*args, **kwargs):
"""
getFaceUVSetNames(faceId) -> (string, ...)
Returns the names of all of the uv sets mapped to the specified face.
This method is not threadsafe.
"""
pass
def getFaceVertexBinormal(*args, **kwargs):
"""
getFaceVertexBinormal(faceId, vertexId, space=MSpace.kObject, uvSet='') -> MVector
Returns the binormal vector at a given face vertex.
This method is not threadsafe.
"""
pass
def getFaceVertexBinormals(*args, **kwargs):
"""
getFaceVertexBinormals(faceId, space=MSpace.kObject, uvSet='') -> MFloatVectorArray
Returns all the per-vertex-per-face binormals for a given face.
This method is not threadsafe.
"""
pass
def getFaceVertexColors(*args, **kwargs):
"""
getFaceVertexColors(colorSet='', defaultUnsetColor=None) -> MColorArray
Returns colors for all the mesh's face-vertices.
The colors are returned in face order: e.g. F0V0, F0V1.. F0Vn, F1V0,
etc... Use the index returned by getFaceVertexIndex() if you wish to
index directly into the returned color array.
If no face has color for that vertex, the entry returned will be
defaultUnsetColor. If a color was set for some but not all the faces
for that vertex, the ones where the color has not been explicitly set
will return (0,0,0). If a vertex has shared color, the same value
will be set for all its vertes/faces.
If the colorSet is not specified, the default color set will be used.
If the defaultUnsetColor is not given, then (-1, -1, -1, -1) will be
used.
"""
pass
def getFaceVertexIndex(*args, **kwargs):
"""
getFaceVertexIndex(faceId, vertexIndex, localVertex=True) -> int
Returns the index for a specific face-vertex into an array of face-
vertex values, such as those returned by getFaceVertexBinormals(),
getFaceVertexColors(), getFaceVertexNormals(), etc.
The values in the target arrays are presumed to be in face order:
F0V0, F0V1.. F0Vn, F1V0, etc...
If localVertex is True then vertexIndex must be a face-relative/local
index. If localVertex is False then vertexIndex must be a mesh-
relative/global index.
The opposite operation is performed by the getFaceAndVertexIndices()
method.
"""
pass
def getFaceVertexNormal(*args, **kwargs):
"""
getFaceVertexNormal(faceId, vertexId, space=MSpace.kObject) -> MVector
Returns the per-vertex-per-face normal for a given face and vertex.
This method is not threadsafe.
"""
pass
def getFaceVertexNormals(*args, **kwargs):
"""
getFaceVertexNormals(faceId, space=MSpace.kObject) -> MFloatVectorArray
Returns the normals for a given face.
This method is not threadsafe.
"""
pass
def getFaceVertexTangent(*args, **kwargs):
"""
getFaceVertexTangent(faceId, vertexId, space=MSpace.kObject, uvSet='') -> MVector
Return the normalized tangent vector at a given face vertex.
The tangent is defined as the surface tangent of the polygon running
in the U direction defined by the uv map.
This method is not threadsafe.
"""
pass
def getFaceVertexTangents(*args, **kwargs):
"""
getFaceVertexTangents(faceId, space=MSpace.kObject, uvSet='') -> MFloatVectorArray
Returns all the per-vertex-per-face tangents for a given face.
The tangent is defined as the surface tangent of the polygon running
in the U direction defined by the uv map.
This method is not threadsafe.
"""
pass
def getFloatBlindData(*args, **kwargs):
"""
getFloatBlindData(compId, compType, blindDataId, attr) -> float
getFloatBlindData(compType, blindDataId, attr) -> (MIntArray, MFloatArray)
The first version returns the value of the specified blind data
attribute from the specified mesh component.
The second version returns a tuple containing an array of component
IDs and an array of values for the specified blind data attribute
for all of the mesh's components of the specified type.
Both versions raise RuntimeError if the attribute is not of
'float' type.
"""
pass
def getFloatPoints(*args, **kwargs):
"""
getFloatPoints(space=MSpace.kObject) -> MFloatPointArray
Returns an MFloatPointArray containing the mesh's vertices.
"""
pass
def getHoles(*args, **kwargs):
"""
getHoles() -> ((face, (v1, v2, ...)), (face, (v1, v2, ...)), ...)
Returns a tuple describing the holes in the mesh. Each element of the
tuple is itself a tuple. The first element of the sub-tuple is the
integer ID of the face in which the hole occurs. The second element
of the sub-tuple is another tuple containing the mesh-relative/global
IDs of the vertices which make up the hole.
Take the following return value as an example:
((3, (7, 2, 6)), (5, (11, 10, 3, 4)))
This says that the mesh has two holes. The first hole is in face 3
and consists of vertices 7, 2 and 6. The second hole is in face 5 and
consists of vertices 11, 10, 3 and 4.
"""
pass
def getIntBlindData(*args, **kwargs):
"""
getIntBlindData(compId, compType, blindDataId, attr) -> int
getIntBlindData(compType, blindDataId, attr) -> (MIntArray, MIntArray)
The first version returns the value of the specified blind data
attribute from the specified mesh component.
The second version returns a tuple containing an array of component
IDs and an array of values for the specified blind data attribute
for all of the mesh's components of the specified type.
Both versions raise RuntimeError if the attribute is not of
'int' type.
"""
pass
def getInvisibleFaces(*args, **kwargs):
"""
getInvisibleFaces() -> MUintArray
Returns the invisible faces of the mesh. Invisible faces are like
lightweight holes in that they are not rendered but do not require
additional geometry the way that holes do. They have the advantage
over holes that if the mesh is smoothed then their edges will be
smoothed as well, while holes will retain their hard edges.
Invisible faces can be set using the setInvisibleFaces() method or
the polyHole command.
"""
pass
def getNormalIds(*args, **kwargs):
"""
getNormalIds() -> (MIntArray, MIntArray)
Returns the normal IDs for all of the mesh's polygons as a tuple of
two int arrays. The first array contains the number of vertices for
each polygon and the second contains the normal IDs for each polygon-
vertex. These IDs can be used to index into the array returned by
getNormals().
"""
pass
def getNormals(*args, **kwargs):
"""
getNormals(space=MSpace.kObject) -> MFloatVectorArray
Returns a copy of the mesh's normals. The normals are the per-polygon
per-vertex normals. To find the normal for a particular vertex-face,
use getFaceNormalIds() to get the index into the array.
This method is not threadsafe.
"""
pass
def getPoint(*args, **kwargs):
"""
getPoint(vertexId, space=MSpace.kObject) -> MPoint
Returns the position of specified vertex.
"""
pass
def getPointAtUV(*args, **kwargs):
"""
getPointAtUV(faceId, u, v, space=MSpace.kObject, uvSet='', tolerance=0.0) -> MPoint
Returns the position of the point at the give UV value in the
specified face.
This method is not threadsafe.
"""
pass
def getPoints(*args, **kwargs):
"""
getPoints(space=MSpace.kObject) -> MPointArray
Returns a copy of the mesh's vertex positions as an MPointArray.
"""
pass
def getPolygonNormal(*args, **kwargs):
"""
getPolygonNormal(polygonId, space=MSpace.kObject) -> MVector
Returns the per-polygon normal for the given polygon.
This method is not threadsafe.
"""
pass
def getPolygonTriangleVertices(*args, **kwargs):
"""
getPolygonTriangleVertices(polygonId, triangleId) -> (int, int, int)
Returns the mesh-relative/global IDs of the 3 vertices of the
specified triangle of the specified polygon. These IDs can be used
to index into the arrays returned by getPoints() and getFloatPoints().
"""
pass
def getPolygonUV(*args, **kwargs):
"""
getPolygonUV(polygonId, vertexId, uvSet='') -> (float, float)
Returns a tuple containing the U and V values at a specified vertex
of a specified polygon.
This method is not threadsafe.
"""
pass
def getPolygonUVid(*args, **kwargs):
"""
getPolygonUVid(polygonId, vertexId, uvSet='') -> int
Returns the ID of the UV at a specified vertex of a specified polygon.
This method is not threadsafe.
"""
pass
def getPolygonVertices(*args, **kwargs):
"""
getPolygonVertices(polygonId) -> MIntArray
Returns the mesh-relative/global vertex IDs the specified polygon.
These IDs can be used to index into the arrays returned by getPoints()
and getFloatPoints().
"""
pass
def getSmoothMeshDisplayOptions(*args, **kwargs):
"""
getSmoothMeshDisplayOptions() -> MMeshSmoothOptions
Returns the options currently in use when smoothing the mesh for display.
"""
pass
def getStringBlindData(*args, **kwargs):
"""
getStringBlindData(compId, compType, blindDataId, attr) -> string
getStringBlindData(compType, blindDataId, attr)
-> (MIntArray, [string, string, ...])
The first version returns the value of the specified blind data
attribute from the specified mesh component.
The second version returns a tuple containing an array of component
IDs and an array of values for the specified blind data attribute
for all of the mesh's components of the specified type.
Both versions raise RuntimeError if the attribute is not of 'string'
type.
"""
pass
def getTangentId(*args, **kwargs):
"""
getTangentId(faceId, vertexId) -> int
Returns the ID of the tangent for a given face and vertex.
"""
pass
def getTangents(*args, **kwargs):
"""
getTangents(space=MSpace.kObject, uvSet='') -> MFloatVectorArray
Return the tangent vectors for all face vertices. The tangent is
defined as the surface tangent of the polygon running in the U
direction defined by the uv map.
This method is not threadsafe.
"""
pass
def getTriangles(*args, **kwargs):
"""
getTriangles() -> (MIntArray, MIntArray)
Returns a tuple describing the mesh's triangulation. The first
element of the tuple is an array giving the number of triangles for
each of the mesh's polygons. The second tuple gives the ids of the
vertices of all the triangles.
"""
pass
def getUV(*args, **kwargs):
"""
getUV(uvId, uvSet='') -> (float, float)
Returns a tuple containing the u and v values of the specified UV.
"""
pass
def getUVAtPoint(*args, **kwargs):
"""
getUVAtPoint(point, space=MSpace.kObject, uvSet='') -> (float, float, int)
Returns a tuple containing the u and v coordinates of the point on
the mesh closest to the given point, and the ID of the face
containing that closest point.
This method is not threadsafe.
"""
pass
def getUVSetFamilyNames(*args, **kwargs):
"""
getUVSetFamilyNames() -> (string, ...)
Returns the names of all of the uv set families on this object. A
uv set family is a set of per-instance sets with the same name
with each individual set applying to one or more instances. A set
which is shared across all instances will be the sole member of its
family.
Given a uv set family name, getUVSetsInFamily() may be used to
determine the names of the associated individual sets.
"""
pass
def getUVSetNames(*args, **kwargs):
"""
getUVSetNames() -> (string, ...)
Returns the names of all the uv sets on this object.
"""
pass
def getUVSetsInFamily(*args, **kwargs):
"""
getUVSetsInFamily(familyName) -> (string, ...)
Returns the names of all of the uv sets that belong to the
specified family. Per-instance sets will have multiple sets in a
family, with each individual set applying to one or more instances.
A set which is shared across all instances will be the sole member
of its family and will share the same name as its family.
"""
pass
def getUVs(*args, **kwargs):
"""
getUVs(uvSet='') -> (MFloatArray, MFloatArray)
Returns a tuple containing an array of U values and an array of V
values, representing all of the UVs for the given UV set.
"""
pass
def getUvShellsIds(*args, **kwargs):
"""
getUvShellsIds(uvSet='') -> (int, MIntArray)
Returns a tuple containing describing how the specified UV set's UVs
are grouped into shells. The first element of the tuple is the number
of distinct shells. The second element of the tuple is an array of
shell indices, one per uv, indicating which shell that uv is part of.
"""
pass
def getVertexColors(*args, **kwargs):
"""
getVertexColors(colorSet='', defaultUnsetColor=None) -> MColorArray
Gets colors for all vertices of the given colorSet. If no face has
color for that vertex, the entry returned will be defaultUnsetColor.
If a color was set for some or all the faces for that vertex, an
average of those vertex/face values where the color has been set will
be returned.
If the colorSet is not specified, the default color set will be used.
If the defaultUnsetColor is not given, then (-1, -1, -1, -1) will be
used.
"""
pass
def getVertexNormal(*args, **kwargs):
"""
getVertexNormal(vertexId, angleWeighted, space=MSpace.kObject) -> MVector
Returns the normal at the given vertex. The returned normal is a
single per-vertex normal, so unshared normals at a vertex will be
averaged.
If angleWeighted is set to true, the normals are computed by an
average of surrounding face normals weighted by the angle subtended
by the face at the vertex. If angleWeighted is set to false, a simple
average of surround face normals is returned.
The simple average evaluation is significantly faster than the angle-
weighted average.
This method is not threadsafe.
"""
pass
def getVertexNormals(*args, **kwargs):
"""
getVertexNormals(angleWeighted, space=MSpace.kObject) -> MFloatVectorArray
Returns all the vertex normals. The returned normals are per-vertex
normals, so unshared normals at a vertex will be averaged.
If angleWeighted is set to True, the normals are computed by an
average of surrounding face normals weighted by the angle subtended
by the face at the vertex. If angleWeighted is set to false, a simple
average of surround face normals is returned.
The simple average evaluation is significantly faster than the angle-
weighted average.
This method is not threadsafe.
"""
pass
def getVertices(*args, **kwargs):
"""
getVertices() -> (MIntArray, MIntArray)
Returns the mesh-relative/global vertex IDs for all of the mesh's
polygons as a tuple of two int arrays. The first array contains the
number of vertices for each polygon and the second contains the mesh-
relative IDs for each polygon-vertex. These IDs can be used to index
into the arrays returned by getPoints() and getFloatPoints().
"""
pass
def hasAlphaChannels(*args, **kwargs):
"""
hasAlphaChannels(colorSet) -> bool
Returns True if the color set has an alpha channel.
"""
pass
def hasBlindData(*args, **kwargs):
"""
hasBlindData(compType, compId=None, blindDataId=None) -> bool
Returns true if any component of the given type on this mesh has
blind data. If a component ID is provided then only that particular
component is checked. If a blind data ID is provided then only blind
data of that type is checked.
"""
pass
def hasColorChannels(*args, **kwargs):
"""
hasColorChannels(colorSet) -> bool
Returns True if the color set has RGB channels.
"""
pass
def isBlindDataTypeUsed(*args, **kwargs):
"""
isBlindDataTypeUsed(blindDataId) -> bool
Returns True if the blind data type is already in use anywhere in the scene.
"""
pass
def isColorClamped(*args, **kwargs):
"""
isColorClamped(colorSet) -> bool
Returns True if the color sets RGBA components are clamped to the
range 0 to 1.
"""
pass
def isColorSetPerInstance(*args, **kwargs):
"""
isColorSetPerInstance(colorSet) -> bool
Returns True if the color set is per-instance, and False if it is
shared across all instances.
"""
pass
def isEdgeSmooth(*args, **kwargs):
"""
isEdgeSmooth(edgeId) -> bool
Returns True if the edge is smooth, False if it is hard.
"""
pass
def isNormalLocked(*args, **kwargs):
"""
isNormalLocked(normalId) -> bool
Returns True if the normal is locked, False otherwise.
"""
pass
def isPolygonConvex(*args, **kwargs):
"""
isPolygonConvex(faceId) -> bool
Returns True if the polygon is convex, False if it is concave.
"""
pass
def isUVSetPerInstance(*args, **kwargs):
"""
isUVSetPerInstance(uvSet) -> bool
Returns True if the UV set is per-instance, and False if it is shared
across all instances.
"""
pass
def lockFaceVertexNormals(*args, **kwargs):
"""
lockFaceVertexNormals(seq of faceIds, seq of vertIds) -> self
Locks the normals for the given face/vertex pairs.
"""
pass
def lockVertexNormals(*args, **kwargs):
"""
lockVertexNormals(sequence of vertIds) -> self
Locks the shared normals for the specified vertices.
"""
pass
def numColors(*args, **kwargs):
"""
numColors(colorSet='') -> int
Returns the number of colors in the given color set. If no color set
is specified then the mesh's current color set will be used.
"""
pass
def numUVs(*args, **kwargs):
"""
numUVs(uvSet='') -> int
Returns the number of UVs (texture coordinates) in the given UV set.
If no UV set is specified then the mesh's current UV set will be used.
"""
pass
def onBoundary(*args, **kwargs):
"""
onBoundary(faceId) -> bool
Returns true if the face is on the border of the mesh, meaning that
one or more of its edges is a border edge.
"""
pass
def polygonVertexCount(*args, **kwargs):
"""
polygonVertexCount(faceId) -> int
Returns the number of vertices in the given polygon. Raises
ValueError if the polygon ID is invalid.
"""
pass
def removeFaceColors(*args, **kwargs):
"""
removeFaceColors(seq of faceIds) -> self
Removes colors from all vertices of the specified faces.
"""
pass
def removeFaceVertexColors(*args, **kwargs):
"""
removeFaceVertexColors(seq of faceIds, seq of vertexIds) -> self
Removes colors from the specified face/vertex pairs.
"""
pass
def removeVertexColors(*args, **kwargs):
"""
removeVertexColors(seq of vertexIds) -> self
Removes colors from the specified vertices in all of the faces which
share those vertices.
"""
pass
def renameUVSet(*args, **kwargs):
"""
renameUVSet(origName, newName, modifier=None) -> self
Renames a UV set. The set must exist and the new name cannot be the
same as that of an existing set.
This method is only valid for functionsets which are attached to mesh
nodes, not mesh data.
"""
pass
def setBinaryBlindData(*args, **kwargs):
"""
setBinaryBlindData(compId, compType, blindDataId, attr, data) -> self
setBinaryBlindData(seq of compId, compType, blindDataId, attr, data) -> self
The first version sets the value of a 'binary' blind data attribute
on a single component of the mesh. The data must be a single string.
The second version sets the value of a 'binary' blind data attribute
on multiple components of the mesh. If the data is a sequence of
strings then it must provide a value for each component in compIds.
If it is a single string then all of the specified components will
have their blind data set to that value.
"""
pass
def setBoolBlindData(*args, **kwargs):
"""
setBoolBlindData(compId, compType, blindDataId, attr, data) -> self
setBoolBlindData(seq of compId, compType, blindDataId, attr, data) -> self
The first version sets the value of a 'boolean' blind data attribute
on a single component of the mesh. The data must be a single boolean.
The second version sets the value of a 'boolean' blind data attribute
on multiple components of the mesh. If the data is a sequence of
booleans then it must provide a value for each component in compIds.
If it is a single boolean then all of the specified components will
have their blind data set to that value.
"""
pass
def setColor(*args, **kwargs):
"""
setColor(colorId, MColor, colorSet='', rep=kRGBA) -> self
Sets a color in the specified colorSet. If no colorSet is given the
current colorSet will be used. If the colorId is greater than or
equal to numColors() then the colorSet will be grown to accommodate
the specified color.
"""
pass
def setColors(*args, **kwargs):
"""
setColors(seq of MColor, colorSet='', rep=kRGBA) -> self
Sets all the colors of the specified colorSet. If no colorSet is
given the current colorSet will be used. After using this method to
set the color values, you can call assignColors() to assign the
corresponding color ids to the geometry.
The color sequence must be at least as large as the current color set
size. You can determine the color set size by calling numColors() for
the default color set, or numColors(colorSet) for a named color set.
If the sequence is larger than the color set size, then the color set
for this mesh will be expanded to accommodate the new color values.
In order to shrink the colorSet you have to clear its existing
colors. E.g: clearColors(), setColors( ... ), assignColors()
"""
pass
def setCreaseEdges(*args, **kwargs):
"""
setCreaseEdges(edgeIds, seq of float) -> self
Sets the specified edges of the mesh as crease edges.
Please note that to make effective use of the creasing variable in
software outside of Maya may require a license under patents owned by
Pixar(R).
"""
pass
def setCreaseVertices(*args, **kwargs):
"""
setCreaseVertices(edgeIds, seq of float) -> self
Sets the specified edges of the mesh as crease edges.
Please note that to make effective use of the creasing variable in
software outside of Maya may require a license under patents owned by
Pixar(R).
"""
pass
def setCurrentColorSetName(*args, **kwargs):
"""
setCurrentColorSetName(colorSet, modifier=None, currentSelection=None) -> self
Sets the 'current' color set for this object. The current color set
is the one used when no color set name is specified for a color
operation. If the specified color set does not exist then the current
color set will not be changed.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
This method may change the current selection. If the 'currentSelection'
(MSelectionList) parameter is provided then the current selection
will be saved to it prior to the change. This is useful for
supporting full undo of the change.
This method is only valid for functionsets which are attached to mesh
nodes, not mesh data.
"""
pass
def setCurrentUVSetName(*args, **kwargs):
"""
setCurrentUVSetName(uvSet, modifier=None, currentSelection=None) -> self
Sets the 'current' uv set for this object. The current uv set is the
one used when no uv set name is specified for a uv operation. If the
specified uv set does not exist then the current uv set will not be
changed.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
This method may change the current selection. If the 'currentSelection'
(MSelectionList) parameter is provided then the current selection
will be saved to it prior to the change. This is useful for
supporting full undo of the change.
This method is only valid for functionsets which are attached to mesh
nodes, not mesh data.
"""
pass
def setDoubleBlindData(*args, **kwargs):
"""
setDoubleBlindData(compId, compType, blindDataId, attr, data) -> self
setDoubleBlindData(seq of compId, compType, blindDataId, attr, data) -> self
The first version sets the value of a 'double' blind data attribute
on a single component of the mesh. The data must be a single float.
The second version sets the value of a 'double' blind data attribute
on multiple components of the mesh. If the data is a sequence of
floats then it must provide a value for each component in compIds.
If it is a single float then all of the specified components will
have their blind data set to that value.
"""
pass
def setEdgeSmoothing(*args, **kwargs):
"""
setEdgeSmoothing(edgeId, smooth=True) -> self
Sets the specified edge to be hard or smooth. You must use the
cleanupEdgeSmoothing() method after all the desired edges on your
mesh have had setEdgeSmoothing() done. Use the updateSurface() method
to indicate the mesh needs to be redrawn.
"""
pass
def setFaceColor(*args, **kwargs):
"""
setFaceColor(color, faceId, rep=kRGBA) -> self
Sets the face-vertex color for all vertices on this face.
"""
pass
def setFaceColors(*args, **kwargs):
"""
setFaceColors(colors, faceIds, rep=kRGBA) -> self
Sets the colors of the specified faces. For each face in the faceIds
sequence the corresponding color from the colors sequence will be
applied to all of its vertices.
"""
pass
def setFaceVertexColor(*args, **kwargs):
"""
setFaceVertexColor(color, faceId, vertexId, modifier=None, rep=kRGBA) -> self
Sets a face-specific normal at a vertex.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
def setFaceVertexColors(*args, **kwargs):
"""
setFaceVertexColors(colors, faceIds, vertexIds, modifier=None, rep=kRGBA) -> self
Sets the colors of the specified face/vertex pairs.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
def setFaceVertexNormal(*args, **kwargs):
"""
setFaceVertexNormal(normal, faceId, vertexId, space=MSpace.kObject, modifier=None) -> self
Sets a face-specific normal at a vertex.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
def setFaceVertexNormals(*args, **kwargs):
"""
setFaceVertexNormal(normals, faceIds, vertexIds, space=MSpace.kObject) -> self
Sets normals for the given face/vertex pairs.
"""
pass
def setFloatBlindData(*args, **kwargs):
"""
setFloatBlindData(compId, compType, blindDataId, attr, data) -> self
setFloatBlindData(seq of compId, compType, blindDataId, attr, data) -> self
The first version sets the value of a 'float' blind data attribute
on a single component of the mesh. The data must be a single float.
The second version sets the value of a 'float' blind data attribute
on multiple components of the mesh. If the data is a sequence of
floats then it must provide a value for each component in compIds.
If it is a single float then all of the specified components will
have their blind data set to that value.
"""
pass
def setIntBlindData(*args, **kwargs):
"""
setIntBlindData(compId, compType, blindDataId, attr, data) -> self
setIntBlindData(seq of compId, compType, blindDataId, attr, data) -> self
The first version sets the value of a 'int' blind data attribute
on a single component of the mesh. The data must be a single int.
The second version sets the value of a 'int' blind data attribute
on multiple components of the mesh. If the data is a sequence of
ints then it must provide a value for each component in compIds.
If it is a single int then all of the specified components will
have their blind data set to that value.
"""
pass
def setInvisibleFaces(*args, **kwargs):
"""
setInvisibleFaces(faceIds, makeVisible=False) -> self
Sets the specified faces of the mesh to be visible or invisible. See
the getInvisibleFaces() method for a description of invisible faces.
"""
pass
def setIsColorClamped(*args, **kwargs):
"""
setIsColorClamped(colorSet, clamped) -> self
Sets whether the color set's RGBA components should be clamped to the
range 0 to 1.
"""
pass
def setNormals(*args, **kwargs):
"""
setNormals(normals, space=MSpace.kObject) -> self
Sets the mesh's normals (user normals).
"""
pass
def setPoint(*args, **kwargs):
"""
setPoint(vertexId, MPoint, space=MSpace.kObject) -> self
Sets the position of specified vertex.
Note that if you modify the position of a vertex for a mesh node (as
opposed to mesh data), a tweak will be created. If you have a node
with no history, the first time that a tweak is created, the
underlying pointers under the MFnMesh object may change. You will
need to call syncObject() to make sure that the object is valid.
Subsequent calls to setPoint() on the same object do not require a
syncObject() call.
"""
pass
def setPoints(*args, **kwargs):
"""
setPoints(points, space=MSpace.kObject) -> self
Sets the positions of the mesh's vertices. The positions may be
given as a sequence of MFloatPoint's or a sequence of MPoint's, but
not a mix of the two.
"""
pass
def setSmoothMeshDisplayOptions(*args, **kwargs):
"""
setSmoothMeshDisplayOptions(MMeshSmoothOptions) -> self
Sets the options to use when smoothing the mesh for display.
"""
pass
def setSomeColors(*args, **kwargs):
"""
setSomeColors(colorIds, colors, colorSet='', rep=kRGBA) -> self
Sets specific colors in a colorSet.
If the largest colorId in the sequence is larger than numColors()
then the colorSet will be grown to accommodate the new color values.
If you have added new colorIds, you can call assignColors to assign
the colorIds to the geometry. If you are modifying existing colors,
they will already be referenced by the existing mesh data.
"""
pass
def setSomeUVs(*args, **kwargs):
"""
setSomeUVs(uvIds, uValues, vValues, uvSet='') -> self
Sets the specified texture coordinates (uv's) for this mesh. The uv
value sequences and the uvIds sequence must all be of equal size. If
the largest uvId in the array is larger than numUVs() then the uv
list for this mesh will be grown to accommodate the new uv values.
If a named uv set is given, the array will be grown when the largest
uvId is larger than numUVs(uvSet).
If you have added new uvIds, you must call one of the assignUV
methods to assign the uvIds to the geometry. If you are modifying
existing UVs, you do not need to call one of the assignUV methods.
"""
pass
def setStringBlindData(*args, **kwargs):
"""
setStringBlindData(compId, compType, blindDataId, attr, data) -> self
setStringBlindData(seq of compId, compType, blindDataId, attr, data) -> self
The first version sets the value of a 'string' blind data attribute
on a single component of the mesh. The data must be a single string.
The second version sets the value of a 'string' blind data attribute
on multiple components of the mesh. If the data is a sequence of
strings then it must provide a value for each component in compIds.
If it is a single string then all of the specified components will
have their blind data set to that value.
"""
pass
def setUV(*args, **kwargs):
"""
setUV(uvId, u, v, uvSet='') -> self
Sets the specified texture coordinate.
The uvId is the element in the uv list that will be set. If the uvId
is greater than or equal to numUVs() then the uv list will be grown
to accommodate the specified uv. If the UV being added is new, thenyou must call one of the assignUV methods in order to update the
geometry.
"""
pass
def setUVs(*args, **kwargs):
"""
setUVs(uValues, vValues, uvSet='') -> self
Sets all of the texture coordinates (uv's) for this mesh. The uv
value sequences must be of equal size and must be at least as large
as the current UV set size. You can determine the UV set size by
calling numUVs() for the default UV set, or numUVs(uvSet) for a
named UV set.
If the sequences are larger than the UV set size, then the uv list
for this mesh will be grown to accommodate the new uv values.
After using this method to set the UV values, you must call one of
the assignUV methods to assign the corresponding UV ids to the
geometry.
In order to shrink the uvs array, do the following: clearUVs(),
setUVs(...), assignUVs(). These steps will let you to create an
array of uvs which is smaller than the original one.
"""
pass
def setVertexColor(*args, **kwargs):
"""
setVertexColor(color, vertexId, modifier=None, rep=kRGBA) -> self
Sets the color for a vertex in all the faces which share it.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
def setVertexColors(*args, **kwargs):
"""
setVertexColors(colors, vertexIds, modifier=None, rep=kRGBA) -> self
Sets the colors of the specified vertices. For each vertex in the
vertexIds sequence, the corresponding color from the colors sequence
will be applied to the vertex in all of the faces which share it.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
def setVertexNormal(*args, **kwargs):
"""
setVertexNormal(normal, vertexId, space=MSpace.kObject, modifier=None) -> self
Sets the shared normal at a vertex.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
def setVertexNormals(*args, **kwargs):
"""
setVertexNormal(normals, vertexIds, space=MSpace.kObject) -> self
Sets the shared normals for the given vertices.
"""
pass
def sortIntersectionFaceTriIds(*args, **kwargs):
"""
sortIntersectionFaceTriIds(faceIds, triIds=none) -> self
Convenience routine for sorting faceIds or face/triangle ids before
passing them into the closestIntersection(), allIntersections(), or
anyIntersection() methods. When using an acceleration structure with
an intersection operation it is essential that any faceId or
faceId/triId arrays be sorted properly to ensure optimal performance.
Both arguments must be MIntArray's.
"""
pass
def split(*args, **kwargs):
"""
split(((kOnEdge, int, float), (kInternalPoint, MFloatPoint), ...)) -> self
Each tuple in the placements sequence consists of a Split Placement
constant followed by one or two parameters.
If the Split Placement is kOnEdge then the tuple will contain two
more elements giving the int id of the edge to split, and a float
value between 0 and 1 indicating how far along the edge to do the
split. The same edge cannot be split more than once per call.
If the Split Placement is kInternalPoint then the tuple will contain
just one more element giving an MFloatPoint within the face.
All splits must begin and end on an edge meaning that the first and
last tuples in the placements sequence must be kOnEdge placements.
"""
pass
def subdivideEdges(*args, **kwargs):
"""
subdivideEdges(edges, numDivisions) -> self
Subdivides edges at regular intervals. For example, if numDivisions
is 2 then two equally-spaced vertices will be added to each of the
specified edges: one 1/3 of the way along the edge and a second 2/3
of the way along the edge.
"""
pass
def subdivideFaces(*args, **kwargs):
"""
subdivideFaces(faces, numDivisions) -> self
Subdivides each specified face into a grid of smaller faces.
Triangles are subdivided into a grid of smaller triangles and quads
are subdivided into a grid of smaller quads. Faces with more than
four edges are ignored.
The numDivisions parameter tells how many times to subdivide each
edge of the face. Internal points and edges are introduced as needed
to create a grid of smaller faces.
"""
pass
def syncObject(*args, **kwargs):
"""
syncObject() -> self
If a non-api operation happens that many have changed the
underlying Maya object attached to this functionset, calling this
method will make sure that the functionset picks up those changes.
In particular this call should be used after calling mel commands
which might affect the mesh. Note that this only applies when the
functionset is attached to a mesh node. If it's attached to mesh
data the it is not necessary to call this method.
"""
pass
def unlockFaceVertexNormals(*args, **kwargs):
"""
unlockFaceVertexNormals(seq of faceIds, seq of vertIds) -> self
Unlocks the normals for the given face/vertex pairs.
"""
pass
def unlockVertexNormals(*args, **kwargs):
"""
unlockVertexNormals(sequence of vertIds) -> self
Unlocks the shared normals for the specified vertices.
"""
pass
def updateSurface(*args, **kwargs):
"""
updateSurface() -> self
Signal that this polygonal mesh has changed and needs to be redrawn.
"""
pass
def autoUniformGridParams(*args, **kwargs):
"""
autoUniformGridParams() -> MMeshIsectAccelParams
Creates an object which specifies a uniform voxel grid structure
which can be used by the intersection routines to speed up their
operation. The number of voxel cells to use will be determined
automatically based on the density of triangles in the mesh. The
grid acceleration structure will be cached with the mesh, so that
if the same MMeshIsectAccelParams configuration is used on the next
intersect call, the acceleration structure will not need to be rebuilt.
"""
pass
def clearGlobalIntersectionAcceleratorInfo(*args, **kwargs):
"""
clearGlobalIntersectionAcceleratorInfo()
Clears the 'total count', 'total build time', and 'peak memory'
fields from the information string returned by
globalIntersectionAcceleratorsInfo(). It will not cause information
about currently existing accelerators to be lost.
"""
pass
def globalIntersectionAcceleratorsInfo(*args, **kwargs):
"""
globalIntersectionAcceleratorsInfo() -> string
Returns a string that describes the system-wide resource usage for
cached mesh intersection accelerators. The string will be of the
following form:
total 10 accelerators created (2 currently active - total current memory = 10000KB), total build time = 10.2s, peak memory = 14567.1KB
This means that:
* a total of 10 intersection accelerators have been created as
instructed by calls to closestIntersection(), allIntersections(),
or anyIntersection() with non-NULL accelParams values. Thesen structures are destroyed and re-created when intersection requests
with differing acceleration parameters are passed in for the same
mesh, so it is useful to see this value, which is the total count
of how many have been created. In this case, 8 of the 10 created
have been destroyed, either automatically or via calls to the
freeCachedIntersectionAccelerator() method
* the total memory footprint for the 2 accelerators currently in
existence is 10,000KB
* the total build time for all 10 structures that have been created
is 10.2 seconds
* the peak of total memory usage for all accelerators in the system
was 14567.1KB
Calling clearGlobalIntersectionAcceleratorInfo() will clear the
'total count', 'total build time', and 'peak memory' fields from
this information. It will not cause information about currently
existing accelerators to be lost.
"""
pass
def uniformGridParams(*args, **kwargs):
"""
uniformGridParams(xDiv, yDiv, zDiv) -> MMeshIsectAccelParams
Creates an object which specifies a uniform voxel grid structure
which can be used by the intersection routines to speed up their
operation. This object specifies the number of voxel cells to be
used in the x, y, and z dimensions. The grid acceleration structure
will be cached with the mesh, so that if the same MMeshIsectAccelParams
configuration is used on the next intersect call, the acceleration
structure will not need to be rebuilt.
"""
pass
checkSamePointTwice = None
displayColors = None
numColorSets = None
numEdges = None
numFaceVertices = None
numNormals = None
numPolygons = None
numUVSets = None
numVertices = None
__new__ = None
kAlpha = 1
kDifference = 2
kInstanceUnspecified = -1
kInternalPoint = 1
kIntersectTolerance = 1e-06
kIntersection = 3
kInvalid = 2
kOnEdge = 0
kPointTolerance = 1e-10
kRGB = 3
kRGBA = 4
kUnion = 1
class MFnNurbsSurfaceData(MFnGeometryData):
"""
MFnNurbsSurfaceData allows the creation and manipulation of Nurbs Surface
data objects for use in the dependency graph.
__init__()
Initializes a new, empty MFnNurbsSurfaceData object
__init__(MObject)
Initializes a new MFnNurbsSurfaceData function set, attached
to the specified object.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
"""
create() -> MObject
Creates a new nurbs surface data object, attaches it to this function set
and returns an MObject which references it.
"""
pass
__new__ = None
|
129359
|
import sys
from collections import OrderedDict
from functools import partial
import torch.nn as nn
from modules import IdentityResidualBlock, GlobalAvgPool2d
from .util import try_index
class ResNeXt(nn.Module):
def __init__(self,
structure,
groups=64,
norm_act=nn.BatchNorm2d,
input_3x3=True,
classes=0,
output_stride=16,
base_channels=(128, 128, 256)):
"""Pre-activation (identity mapping) ResNeXt model
Parameters
----------
structure : list of int
Number of residual blocks in each of the four modules of the network.
groups : int
Number of groups in each ResNeXt block
norm_act : callable
Function to create normalization / activation Module.
input_3x3 : bool
If `True` use three `3x3` convolutions in the input module instead of a single `7x7` one.
classes : int
If not `0` also include global average pooling and a fully-connected layer with `classes` outputs at the end
of the network.
dilation : list of list of int or list of int or int
List of dilation factors, or `1` to ignore dilation. For each module, if a single value is given it is
used for all its blocks, otherwise this expects a value for each block.
base_channels : list of int
Channels in the blocks of the first residual module. Each following module will multiply these values by 2.
"""
super(ResNeXt, self).__init__()
self.structure = structure
if len(structure) != 4:
raise ValueError("Expected a structure with four values")
if output_stride == 16:
dilation = [1, 1, 1, 2] # dilated conv for last 3 blocks (9 layers)
elif output_stride == 8:
dilation = [1, 1, 2, 4] # 23+3 blocks (78 layers)
else:
raise NotImplementedError
# Initial layers
if input_3x3:
layers = [
("conv1", nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False)),
("bn1", norm_act(64)),
("conv2", nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)),
("bn2", norm_act(64)),
("conv3", nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)),
("pool", nn.MaxPool2d(3, stride=2, padding=1))
]
else:
layers = [
("conv1", nn.Conv2d(3, 64, 7, stride=2, padding=3, bias=False)),
("pool", nn.MaxPool2d(3, stride=2, padding=1))
]
self.mod1 = nn.Sequential(OrderedDict(layers))
# Groups of residual blocks
in_channels = 64
channels = base_channels
for mod_id, num in enumerate(structure):
# Create blocks for module
blocks = []
for block_id in range(num):
# stride is 2 when dilation=1, mod_id>0, block_id=0, else is 1
s, d = self._stride_dilation(mod_id, block_id, dilation)
blocks.append((
"block%d" % (block_id + 1),
IdentityResidualBlock(in_channels, channels, stride=s, norm_act=norm_act, groups=groups, dilation=d)
))
# Update channels
in_channels = channels[-1]
# Create and add module
self.add_module("mod%d" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))
channels = [c * 2 for c in channels]
self.out_channels = in_channels
# Pooling and predictor
self.bn_out = norm_act(in_channels)
if classes != 0:
self.classifier = nn.Sequential(OrderedDict([
("avg_pool", GlobalAvgPool2d()),
("fc", nn.Linear(in_channels, classes))
]))
def forward(self, img):
out = self.mod1(img)
out = self.mod2(out)
out = self.mod3(out)
out = self.mod4(out)
out = self.mod5(out)
out = self.bn_out(out)
if hasattr(self, "classifier"):
out = self.classifier(out)
return out
@staticmethod
def _stride_dilation(mod_id, block_id, dilation):
if dilation == 1:
s = 2 if mod_id > 0 and block_id == 0 else 1
d = 1
else:
if dilation[mod_id] == 1:
s = 2 if mod_id > 0 and block_id == 0 else 1
d = 1
else:
s = 1
d = try_index(dilation[mod_id], block_id)
return s, d
_NETS = {
"50": {"structure": [3, 4, 6, 3]},
"101": {"structure": [3, 4, 23, 3]},
"152": {"structure": [3, 8, 36, 3]},
}
__all__ = []
for name, params in _NETS.items():
net_name = "net_resnext" + name
setattr(sys.modules[__name__], net_name, partial(ResNeXt, **params))
__all__.append(net_name)
|
129390
|
from .resource import FieldsResource
"""
contentful.asset
~~~~~~~~~~~~~~~~
This module implements the Asset class.
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/assets
:copyright: (c) 2016 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class Asset(FieldsResource):
"""
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/assets
"""
def url(self, **kwargs):
"""Returns a formatted URL for the Asset's File
with serialized parameters.
Usage:
>>> my_asset.url()
"//images.contentful.com/spaces/foobar/..."
>>> my_asset.url(w=120, h=160)
"//images.contentful.com/spaces/foobar/...?w=120&h=160"
"""
if not hasattr(self, 'file') or not self.file:
return ""
url = self.file['url']
args = ['{0}={1}'.format(k, v) for k, v in kwargs.items()]
if args:
url += '?{0}'.format('&'.join(args))
return url
def incoming_references(self, client=None, query=None):
"""Fetches all entries referencing the asset
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/search-parameters/links-to-asset
:param client Client instance
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = asset.incoming_references(client)
[<Entry[cat] id='happycat'>]
"""
if query is None:
query = {}
if client is None:
return False
query.update({'links_to_asset': self.id})
return client.entries(query)
def __repr__(self):
return "<Asset id='{0}' url='{1}'>".format(
self.sys.get('id', ''),
self.url()
)
|
129401
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
import future.utils
from functools import reduce
import fractions
import operator
import os
import re
import sys
import tempfile
from html.parser import HTMLParser
def make_none(*args, **kwargs):
return None
def if_not_none(item, default):
""" Equivalent to `item if item is not None else default` """
if item is None:
return default
else:
return item
class MLStripper(HTMLParser):
""" Strips markup language tags from a string.
FROM http://stackoverflow.com/a/925630/1958900
"""
def __init__(self):
if not future.utils.PY2:
super().__init__()
self.reset()
self.fed = []
self.strict = False
self.convert_charrefs = True
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def html_to_text(html):
"""
FROM http://stackoverflow.com/a/925630/1958900
"""
s = MLStripper()
s.unescape = True # convert HTML entities to text
s.feed(html)
return s.get_data()
def printflush(s, newline=True):
if newline:
print(s)
else:
print(s, end=' ')
sys.stdout.flush()
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
Note:
Copied without modification from Python 3.6.1 ``shutil.which`
source code
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
class methodcaller(object):
"""The pickleable implementation of the standard library operator.methodcaller.
This was copied without modification from:
https://github.com/python/cpython/blob/065990fa5bd30fb3ca61b90adebc7d8cb3f16b5a/Lib/operator.py
The c-extension version is not pickleable, so we keep a copy of the pure-python standard library
code here. See https://bugs.python.org/issue22955
Original documentation:
Return a callable object that calls the given method on its operand.
After f = methodcaller('name'), the call f(r) returns r.name().
After g = methodcaller('name', 'date', foo=1), the call g(r) returns
r.name('date', foo=1).
"""
__slots__ = ('_name', '_args', '_kwargs')
def __init__(*args, **kwargs):
if len(args) < 2:
msg = "methodcaller needs at least one argument, the method name"
raise TypeError(msg)
self = args[0]
self._name = args[1]
if not isinstance(self._name, future.utils.native_str):
raise TypeError('method name must be a string')
self._args = args[2:]
self._kwargs = kwargs
def __call__(self, obj):
return getattr(obj, self._name)(*self._args, **self._kwargs)
def __repr__(self):
args = [repr(self._name)]
args.extend(list(map(repr, self._args)))
args.extend('%s=%r' % (k, v) for k, v in list(self._kwargs.items()))
return '%s.%s(%s)' % (self.__class__.__module__,
self.__class__.__name__,
', '.join(args))
def __reduce__(self):
if not self._kwargs:
return self.__class__, (self._name,) + self._args
else:
from functools import partial
return partial(self.__class__, self._name, **self._kwargs), self._args
class textnotify(object):
""" Print a single, immediately flushed line to log the execution of a block.
Prints 'done' at the end of the line (or 'ERROR' if an uncaught exception)
Examples:
>>> import time
>>> with textnotify('starting to sleep'):
>>> time.sleep(3)
starting to sleep...done
>>> with textnotify('raising an exception...'):
>>> raise ValueError()
raising an exception...error
ValueError [...]
"""
def __init__(self, startmsg):
if startmsg.strip()[-3:] != '...':
startmsg = startmsg.strip() + '...'
self.startmsg = startmsg
def __enter__(self):
printflush(self.startmsg, newline=False)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
printflush('done')
else:
printflush('ERROR')
class BaseTable(object):
def __init__(self, categories, fileobj=None):
self.categories = categories
self.lines = []
self.fileobj = fileobj
def add_line(self, obj):
if hasattr(obj, 'keys'):
newline = [obj.get(cat, '') for cat in self.categories]
else:
assert len(obj) == len(self.categories)
newline = obj
self.lines.append(newline)
self.writeline(newline)
def writeline(self, newline):
raise NotImplementedError()
def getstring(self):
raise NotImplementedError()
class MarkdownTable(BaseTable):
def __init__(self, *categories):
super().__init__(categories)
def markdown(self, replace=None):
if replace is None: replace = {}
outlines = ['| ' + ' | '.join(self.categories) + ' |',
'|-' + ''.join('|-' for x in self.categories) + '|']
for line in self.lines:
nextline = [str(replace.get(val, val)) for val in line]
outlines.append('| ' + ' | '.join(nextline) + ' |')
return '\n'.join(outlines)
def writeline(self, newline):
pass
def getstring(self):
return self.markdown()
def binomial_coefficient(n, k):
# credit to http://stackoverflow.com/users/226086/nas-banov
return int(reduce(operator.mul,
(fractions.Fraction(n - i, i + 1) for i in range(k)), 1))
def pairwise_displacements(a):
"""
:type a: numpy.array
from http://stackoverflow.com/questions/22390418/pairwise-displacement-vectors-among-set-of-points
"""
import numpy as np
n = a.shape[0]
d = a.shape[1]
c = binomial_coefficient(n, 2)
out = np.zeros((c, d))
l = 0
r = l + n - 1
for sl in range(1, n): # no point1 - point1!
out[l:r] = a[:n - sl] - a[sl:]
l = r
r += n - (sl + 1)
return out
def is_printable(s):
import string
for c in s:
if c not in string.printable:
return False
else:
return True
class _RedirectStream(object):
"""From python3.4 stdlib
"""
_stream = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stderr(_RedirectStream):
"""From python3.4 stdlib"""
_stream = "stderr"
GETFLOAT = re.compile(r'-?\d+(\.\d+)?(e[-+]?\d+)') # matches numbers, e.g. 1, -2.0, 3.5e50, 0.001e-10
def from_filepath(func, filelike):
"""Run func on a temporary *path* assigned to filelike"""
if type(filelike) == str:
return func(filelike)
else:
with tempfile.NamedTemporaryFile() as outfile:
outfile.write(filelike.read().encode()) # hack - prob need to detect bytes
outfile.flush()
result = func(outfile.name)
return result
|
129416
|
from fabric import task
from patchwork.transfers import rsync
import os
import os.path as osp
from pathlib import Path
import json
## START EDIT: Edit these values to your profiles
# name of the bimhaw profile used in the bootstrapping process
PHASE1_PROFILE = "scooter"
# name of the bimhaw profile that is the end state target
FINAL_PROFILE = "bronco"
# name of the loadout (collection of installed programs) to install
FINAL_LOADOUT = "humvee"
## END EDIT
@task
def packages_bootstrap(cx):
"""Install the main distro provided packages"""
packages = [
'git',
'rsync',
'mg',
'python3-venv',
]
cx.run(f"sudo apt install -y {' '.join(packages)}")
@task
def bimhaw_bootstrap(cx):
"""Configure bimhaw and shells in system to PHASE 1.
PHASE 1 is a loadout profile which is used to then install more
advanced environments.
"""
# make a python virtual env to install it at first and set the
# shells
cx.run("mkdir -p $HOME/tmp")
cx.run("python3 -m venv ~/tmp/py_bootstrap_env")
# run the bimhaw initialization stuff
with cx.prefix(". tmp/py_bootstrap_env/bin/activate"):
cx.run("rm -rf $HOME/.bimhaw")
cx.run("pip install git+https://github.com/salotz/bimhaw.git")
cx.run("python -m bimhaw.init "
"--config ~/.salotz.d/bimhaw/config.py "
"--lib ~/.salotz.d/bimhaw/lib")
# cx.run("rm $HOME/.profile ~/.bashrc ~/.bash_logout")
cx.run("bimhaw link-shells --force && "
f"bimhaw profile -n {PHASE1_PROFILE}"
)
@task
def pyenv_bootstrap(cx):
with cx.cd("$HOME/.salotz.d"):
cx.run("/bin/bash -l -c './lib/installers/python_sys.sh'")
cx.run("/bin/bash -l -c './lib/installers/python_pyenv.bash'")
cx.run("/bin/bash -l -c './lib/setups/python_pyenv.bash'")
cx.run("/bin/bash -l -c 'pip3 install invoke'")
cx.run("/bin/bash -l -c './lib/setups/python_virtualenv.sh'")
@task
def install_loadout(cx):
with cx.cd(f"$HOME/.salotz.d"):
cx.run(f"/bin/bash -l -c 'make -f lib/loadouts/{{FINAL_LOADOUT}}.mk all'")
# load the bimhaw profile
cx.run(f"/bin/bash -l -c './lib/setups/bimhaw.sh && bimhaw profile -n {FINAL_PROFILE}'")
@task()
def bootstrap(cx):
"""Take a container from blank to loaded out."""
packages_bootstrap(cx)
push_profile(cx)
bimhaw_bootstrap(cx)
pyenv_bootstrap(cx)
install_loadout(cx)
@task
def push_profile(cx):
"""Push my configuration from local user. Assumes bootstrap install is
done."""
homedir = osp.expandvars('$HOME')
rsync(cx,
f"{homedir}/.salotz.d",
f"{homedir}/",
rsync_opts="-ahi --stats",
)
@task
def push_project(cx, target_dir=None):
# get the directory to push to
if target_dir is None:
target_dir = Path(os.getcwd()).parent
cx.run(f"mkdir -p {target_dir}")
rsync(cx,
os.getcwd(),
target_dir,
rsync_opts="-ahi --stats --filter=':- .gitignore'",
)
@task
def pull_project(cx):
# get the directory to push to
target_dir = Path(os.getcwd()).parent
rsync(cx,
os.getcwd(),
target_dir,
rsync_opts="-ahi --stats --filter=':- .gitignore' --update",
)
|
129497
|
d=[int(i) for i in input().split()]
s=sum(d)
result=0
if s%len(d)==0:
for i in range(len(d)):
if d[i]<(s//len(d)):
result=result+((s//len(d))-d[i])
print(result,end='')
else:
print('-1',end='')
|
129502
|
import os
from typing import List
from app.common.build_artifact import BuildArtifact
from app.master.atom import AtomState
from app.util.conf.configuration import Configuration
from app.util.log import get_logger
from app.util.pagination import get_paginated_indices
class Subjob(object):
def __init__(self, build_id, subjob_id, project_type, job_config, atoms):
"""
:param build_id:
:type build_id: int
:param subjob_id:
:type subjob_id: int
:param project_type:
:type project_type: ProjectType
:param job_config: the job's configuration from clusterrunner.yaml
:type job_config: JobConfig
:param atoms: the atom project_type strings
:type atoms: list[app.master.atom.Atom]
:return:
"""
self._logger = get_logger(__name__)
self._build_id = build_id
self._subjob_id = subjob_id
self._project_type = project_type # todo: Unused; remove.
self.job_config = job_config
self._atoms = atoms
self._set_atoms_subjob_id(atoms, subjob_id)
self._set_atom_state(AtomState.NOT_STARTED)
self.timings = {} # a dict, atom_ids are the keys and seconds are the values
self.slave = None # The slave that had been assigned this subjob. Is None if not started.
def __str__(self):
return '<subjob {} of build {}>'.format(self._subjob_id, self._build_id)
def _set_atoms_subjob_id(self, atoms, subjob_id):
"""
Set the subjob_id on each atom
:param atoms: an array of atoms to set the subjob_id on
:type atoms: list[app.master.atom.Atom]
:param subjob_id: the subjob_id to set on the atoms
:type subjob_id: int
"""
for atom in atoms:
atom.subjob_id = subjob_id
def _set_atom_state(self, state):
"""
Set the state of all atoms of the subjob.
:param state: up-to-date state of all atoms of the subjob
:type state: `:class:AtomState`
"""
for atom in self._atoms:
atom.state = state
def mark_in_progress(self, slave):
"""
Mark the subjob IN_PROGRESS, which marks the state of all the atoms of the subjob IN_PROGRESS.
:param slave: the slave node that has been assigned this subjob.
:type slave: Slave
"""
self._set_atom_state(AtomState.IN_PROGRESS)
self.slave = slave
def mark_completed(self):
"""
Mark the subjob COMPLETED, which marks the state of all the atoms of the subjob COMPLETED.
"""
self._set_atom_state(AtomState.COMPLETED)
def api_representation(self):
"""
:rtype: dict [str, str]
"""
return {
'id': self._subjob_id,
'command': self.job_config.command,
'atoms': [atom.api_representation() for atom in self._atoms],
'slave': self.slave.url if self.slave else None,
}
@property
def atoms(self) -> List['Atom']:
"""
Returns a list of all atoms for this subjob
"""
return self._atoms
def get_atoms(self, offset: int=None, limit: int=None) -> List['Atom']:
"""
Returns a list of atoms for this subjob
:param offset: The starting index of the requested build
:param limit: The number of builds requested
:rtype: list[app.master.atom.Atom]
"""
num_atoms = len(self._atoms)
start, end = get_paginated_indices(offset, limit, num_atoms)
return self._atoms[start:end]
def build_id(self):
"""
:return:
:rtype: int
"""
return self._build_id
def subjob_id(self):
"""
:return:
:rtype: int
"""
return self._subjob_id
def atomic_commands(self):
"""
The list of atom commands -- the atom id for each atom is implicitly defined by the index of the list.
:rtype: list[str]
"""
job_command = self.job_config.command
return ['{} {}'.format(atom.command_string, job_command) for atom in self._atoms]
def add_timings(self, timings):
"""
Add timing data for this subjob's atoms, collected from a slave
:param timings:
:type timings: dict [string, float]
"""
self.timings.update(timings)
def read_timings(self):
"""
The timing data for each atom should be stored in the atom directory. Parse them, associate
them with their atoms, and return them.
:rtype: dict [str, float]
"""
timings = {}
for atom_id, atom in enumerate(self._atoms):
artifact_dir = BuildArtifact.atom_artifact_directory(
self.build_id(),
self.subjob_id(),
atom_id,
result_root=Configuration['results_directory']
)
timings_file_path = os.path.join(artifact_dir, BuildArtifact.TIMING_FILE)
if os.path.exists(timings_file_path):
with open(timings_file_path, 'r') as f:
atom.actual_time = float(f.readline())
timings[atom.command_string] = atom.actual_time
else:
self._logger.warning('No timing data for subjob {} atom {}.',
self._subjob_id, atom_id)
if len(timings) == 0:
self._logger.warning('No timing data for subjob {}.', self._subjob_id)
return timings
|
129514
|
import tensorflow as tf
from nn_basic_layers import *
from filterbank_shape import FilterbankShape
from ops import *
class Naive_Fusion_Net(object):
def __init__(self, config):
# Placeholders for input, output and dropout
self.config = config
self.input_x1 = tf.placeholder(tf.float32,shape=[None, self.config.epoch_seq_len, self.config.deep_ntime, self.config.nchannel],name='input_x1')
self.input_x2 = tf.placeholder(tf.float32, [None, self.config.epoch_seq_len, self.config.seq_frame_seq_len, self.config.seq_ndim, self.config.nchannel], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None, self.config.epoch_seq_len, self.config.nclass], name="input_y")
self.dropout_rnn = tf.placeholder(tf.float32, name="dropout_rnn")
self.dropout_cnn = tf.placeholder(tf.float32, name="dropout_cnn")
self.istraining = tf.placeholder(tf.bool, name='istraining') # idicate training for batch normmalization
self.seq_frame_seq_len = tf.placeholder(tf.int32, [None]) # for the dynamic RNN
self.epoch_seq_len = tf.placeholder(tf.int32, [None]) # for the dynamic RNN
self.construct_fcnnrnn_net()
self.construct_seqsleepnet()
self.score = []
self.prediction = []
with tf.variable_scope("output_layer"):
for i in range(self.config.epoch_seq_len):
score_i = fc(tf.concat([tf.squeeze(self.deep_rnn_out[:,i,:]), tf.squeeze(self.seq_rnn_out2[:,i,:])], 1),
self.config.seq_nhidden2 * 2 + self.config.deep_nhidden * 2,
self.config.nclass,
name="joint_output",
relu=False)
pred_i = tf.argmax(score_i, 1, name="pred-%s" % i)
self.score.append(score_i)
self.prediction.append(pred_i)
self.output_loss = 0
with tf.name_scope("output-loss"):
for i in range(self.config.epoch_seq_len):
loss_i = tf.nn.softmax_cross_entropy_with_logits(labels=tf.squeeze(self.input_y[:,i,:]), logits=self.score[i])
loss_i = tf.reduce_sum(loss_i, axis=[0])
self.output_loss += loss_i
self.output_loss = self.output_loss/self.config.epoch_seq_len # average over sequence length
self.accuracy = []
with tf.name_scope("accuracy"):
for i in range(self.config.epoch_seq_len):
correct_prediction_i = tf.equal(self.prediction[i], tf.argmax(tf.squeeze(self.input_y[:,i,:]), 1))
accuracy_i = tf.reduce_mean(tf.cast(correct_prediction_i, "float"), name="accuracy-%s" % i)
self.accuracy.append(accuracy_i)
# add on regularization except for the filter bank layers
with tf.name_scope("l2_loss"):
vars = tf.trainable_variables()
except_vars_eeg = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='seq_filterbank-layer-eeg')
except_vars_eog = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='seq_filterbank-layer-eog')
except_vars_emg = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='seq_filterbank-layer-emg')
l2_loss = tf.add_n([ tf.nn.l2_loss(v) for v in vars
if v not in except_vars_eeg and v not in except_vars_eog and v not in except_vars_emg])
self.loss = self.output_loss + self.config.l2_reg_lambda*l2_loss
#############################################################
# Construct fcnnrnn here
#############################################################
def construct_fcnnrnn_net(self):
self.g_enc_depths = [16, 16, 32, 32, 64, 64, 128, 128, 256]
deep_X = tf.reshape(self.input_x1, [-1, self.config.deep_ntime, self.config.nchannel])
with tf.device('/gpu:0'), tf.variable_scope("deep_all_cnn_layer") as scope:
deep_cnn_feat = self.all_convolution_block(deep_X,"all_conv_block")
deep_num_cnn_feat = 6*self.g_enc_depths[-1]
deep_cnn_feat = tf.reshape(deep_cnn_feat, [-1, deep_num_cnn_feat])
print("deep_cnn_feat")
print(deep_cnn_feat.get_shape())
deep_rnn_input = tf.reshape(deep_cnn_feat, [-1, self.config.epoch_seq_len, deep_num_cnn_feat])
# bidirectional sequence-level recurrent layer
with tf.device('/gpu:0'), tf.variable_scope("deep_epoch_rnn_layer") as scope:
deep_fw_cell, deep_bw_cell = bidirectional_recurrent_layer(self.config.deep_nhidden,
self.config.deep_nlayer,
input_keep_prob=self.dropout_rnn,
output_keep_prob=self.dropout_rnn)
self.deep_rnn_out, self.deep_rnn_state = bidirectional_recurrent_layer_output(deep_fw_cell,
deep_bw_cell,
deep_rnn_input,
self.epoch_seq_len,
scope=scope)
print(self.deep_rnn_out.get_shape())
#############################################################
# End deepsleepnet here
#############################################################
#############################################################
# Construct seqsleepnet here
#############################################################
def construct_seqsleepnet(self):
filtershape = FilterbankShape()
#triangular filterbank
self.Wbl = tf.constant(filtershape.lin_tri_filter_shape(nfilt=self.config.seq_nfilter,
nfft=self.config.seq_nfft,
samplerate=self.config.seq_samplerate,
lowfreq=self.config.seq_lowfreq,
highfreq=self.config.seq_highfreq),
dtype=tf.float32,
name="W-filter-shape-eeg")
with tf.device('/gpu:0'), tf.variable_scope("seq_filterbank-layer-eeg"):
# Temporarily crush the feature_mat's dimensions
Xeeg = tf.reshape(tf.squeeze(self.input_x2[:,:,:,:,0]), [-1, self.config.seq_ndim])
# first filter bank layer
self.Weeg = tf.Variable(tf.random_normal([self.config.seq_ndim, self.config.seq_nfilter],dtype=tf.float32))
# non-negative constraints
self.Weeg = tf.sigmoid(self.Weeg)
# mask matrix should be replaced by shape-specific filter bank, e.g. triangular,rectangle.
self.Wfb = tf.multiply(self.Weeg,self.Wbl)
HWeeg = tf.matmul(Xeeg, self.Wfb) # filtering
HWeeg = tf.reshape(HWeeg, [-1, self.config.epoch_seq_len, self.config.seq_frame_seq_len, self.config.seq_nfilter])
if(self.config.nchannel > 1):
with tf.device('/gpu:0'), tf.variable_scope("seq_filterbank-layer-eog"):
# Temporarily crush the feature_mat's dimensions
Xeog = tf.reshape(tf.squeeze(self.input_x2[:,:,:,:,1]), [-1, self.config.seq_ndim])
# first filter bank layer
self.Weog = tf.Variable(tf.random_normal([self.config.seq_ndim, self.config.seq_nfilter],dtype=tf.float32))
# non-negative constraints
self.Weog = tf.sigmoid(self.Weog)
# mask matrix should be replaced by shape-specific filter bank, e.g. triangular,rectangle.
self.Wfb = tf.multiply(self.Weog,self.Wbl)
HWeog = tf.matmul(Xeog, self.Wfb) # filtering
HWeog = tf.reshape(HWeog, [-1, self.config.epoch_seq_len, self.config.seq_frame_seq_len, self.config.seq_nfilter])
if(self.config.nchannel > 2):
with tf.device('/gpu:0'), tf.variable_scope("seq_filterbank-layer-emg"):
# Temporarily crush the feature_mat's dimensions
Xemg = tf.reshape(tf.squeeze(self.input_x2[:,:,:,:,2]), [-1, self.config.seq_ndim])
# first filter bank layer
self.Wemg = tf.Variable(tf.random_normal([self.config.seq_ndim, self.config.seq_nfilter],dtype=tf.float32))
# non-negative constraints
self.Wemg = tf.sigmoid(self.Wemg)
# mask matrix should be replaced by shape-specific filter bank, e.g. triangular,rectangle.
self.Wfb = tf.multiply(self.Wemg,self.Wbl)
HWemg = tf.matmul(Xemg, self.Wfb) # filtering
HWemg = tf.reshape(HWemg, [-1, self.config.epoch_seq_len, self.config.seq_frame_seq_len, self.config.seq_nfilter])
if(self.config.nchannel > 2):
X2 = tf.concat([HWeeg, HWeog, HWemg], axis = 3)
elif(self.config.nchannel > 1):
X2 = tf.concat([HWeeg, HWeog], axis = 3)
else:
X2 = HWeeg
X2 = tf.reshape(X2, [-1, self.config.seq_frame_seq_len, self.config.seq_nfilter*self.config.nchannel])
# bidirectional epoch-level recurrent layer
with tf.variable_scope("seq_frame_rnn_layer") as scope:
seq_fw_cell1, seq_bw_cell1 = bidirectional_recurrent_layer_bn(self.config.seq_nhidden1,
self.config.seq_nlayer1,
seq_len=self.config.seq_frame_seq_len,
is_training=self.istraining,
input_keep_prob=self.dropout_rnn, # we have dropouted in the convolutional layer
output_keep_prob=self.dropout_rnn)
seq_rnn_out1, seq_rnn_state1 = bidirectional_recurrent_layer_output(seq_fw_cell1,
seq_bw_cell1,
X2,
self.seq_frame_seq_len,
scope=scope)
print(seq_rnn_out1.get_shape())
with tf.variable_scope("seq_frame_attention_layer"):
self.seq_attention_out1, _ = attention(seq_rnn_out1, self.config.seq_attention_size1)
print(self.seq_attention_out1.get_shape())
seq_e_rnn_input = tf.reshape(self.seq_attention_out1, [-1, self.config.epoch_seq_len, self.config.seq_nhidden1*2])
# bidirectional seq-level recurrent layer
with tf.variable_scope("seq_epoch_rnn_layer") as scope:
seq_fw_cell2, seq_bw_cell2 = bidirectional_recurrent_layer_bn(self.config.seq_nhidden2,
self.config.seq_nlayer2,
seq_len=self.config.epoch_seq_len,
is_training=self.istraining,
input_keep_prob=self.dropout_rnn, # we have dropouted the output of frame-wise rnn
output_keep_prob=self.dropout_rnn)
self.seq_rnn_out2, self.seq_rnn_state2 = bidirectional_recurrent_layer_output(seq_fw_cell2,
seq_bw_cell2,
seq_e_rnn_input,
self.epoch_seq_len,
scope=scope)
print(self.seq_rnn_out2.get_shape())
#############################################################
# End seqsleepnet
############################################################
def all_convolution_block(self, input, name):
in_dims = input.get_shape().as_list()
print(in_dims)
h_i = input
if len(in_dims) == 2:
h_i = tf.expand_dims(input, -1)
elif len(in_dims) < 2 or len(in_dims) > 3:
raise ValueError('Generator input must be 2-D or 3-D')
kwidth = 31
with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
for layer_idx, layer_depth in enumerate(self.g_enc_depths):
bias_init = tf.constant_initializer(0.)
h_i_dwn = downconv(h_i, layer_depth, kwidth=kwidth,
init=tf.truncated_normal_initializer(stddev=0.02),
bias_init=bias_init,
name='enc_{}'.format(layer_idx))
print("h_i_dwn")
print(h_i_dwn.get_shape())
print('Downconv {} -> {}'.format(h_i.get_shape(),h_i_dwn.get_shape()))
h_i = h_i_dwn
print('-- Enc: leakyrelu activation --')
h_i = leakyrelu(h_i)
if(layer_idx < len(self.g_enc_depths) - 1):
h_i = dropout(h_i, self.dropout_cnn)
return h_i
|
129542
|
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_equal, assert_array_almost_equal_nulp)
import numpy as np
import pytest
import matplotlib.mlab as mlab
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
def _stride_repeat(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.stride_repeat(*args, **kwargs)
class TestStride:
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0, axis=0):
"""
This is an adaptation of the original window extraction algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
if axis == 1:
result = result.T
return result
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_windows_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
mlab.stride_windows(x, 5)
@pytest.mark.parametrize('n, noverlap',
[(0, None), (11, None), (2, 2), (2, 3)],
ids=['n less than 1', 'n greater than input',
'noverlap greater than n',
'noverlap equal to n'])
def test_stride_windows_invalid_params(self, n, noverlap):
x = np.arange(10)
with pytest.raises(ValueError):
mlab.stride_windows(x, n, noverlap)
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_repeat_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
_stride_repeat(x, 5)
@pytest.mark.parametrize('axis', [-1, 2],
ids=['axis less than 0',
'axis greater than input shape'])
def test_stride_repeat_invalid_axis(self, axis):
x = np.array(0)
with pytest.raises(ValueError):
_stride_repeat(x, 5, axis=axis)
def test_stride_repeat_n_lt_1_ValueError(self):
x = np.arange(10)
with pytest.raises(ValueError):
_stride_repeat(x, 0)
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n', [1, 5], ids=['n1', 'n5'])
def test_stride_repeat(self, n, axis):
x = np.arange(10)
y = _stride_repeat(x, n, axis=axis)
expected_shape = [10, 10]
expected_shape[axis] = n
yr = np.repeat(np.expand_dims(x, axis), n, axis=axis)
assert yr.shape == y.shape
assert_array_equal(yr, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n, noverlap',
[(1, 0), (5, 0), (15, 2), (13, -3)],
ids=['n1-noverlap0', 'n5-noverlap0',
'n15-noverlap2', 'n13-noverlapn3'])
def test_stride_windows(self, n, noverlap, axis):
x = np.arange(100)
y = mlab.stride_windows(x, n, noverlap=noverlap, axis=axis)
expected_shape = [0, 0]
expected_shape[axis] = n
expected_shape[1 - axis] = 100 // (n - noverlap)
yt = self.calc_window_target(x, n, noverlap=noverlap, axis=axis)
assert yt.shape == y.shape
assert_array_equal(yt, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
def test_stride_windows_n32_noverlap0_unflatten(self, axis):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n, axis=axis)
if axis == 0:
x1 = x1.T
assert y.shape == x1.shape
assert_array_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = np.full(N + 20, np.nan)
y = x[10:-10]
y[:] = 0.3
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_array_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_array_equal(y_strided, 0.3)
# even previous to #3845 could not find any problematic
# configuration however, let's be sure it's not accidentally
# introduced
y_strided = _stride_repeat(y, n=33.815)
assert_array_equal(y_strided, 0.3)
def _apply_window(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.apply_window(*args, **kwargs)
class TestWindow:
def setup(self):
np.random.seed(0)
n = 1000
self.sig_rand = np.random.standard_normal(n) + 100.
self.sig_ones = np.ones(n)
def check_window_apply_repeat(self, x, window, NFFT, noverlap):
"""
This is an adaptation of the original window application algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
if np.iterable(window):
windowVals = window
else:
windowVals = window(np.ones(NFFT, x.dtype))
# do the ffts of the slices
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_ones)
assert_array_equal(res, self.sig_ones)
def test_window_none_ones(self):
res = mlab.window_none(self.sig_rand)
assert_array_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = np.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_allclose(targ, res, atol=1e-06)
def test_window_hanning_ones(self):
targ = np.hanning(len(self.sig_ones))
res = mlab.window_hanning(self.sig_ones)
assert_allclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]-1))
with pytest.raises(ValueError):
_apply_window(x, window)
def test_apply_window_0D_ValueError(self):
x = np.array(0)
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[np.newaxis][np.newaxis]
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, return_window=True)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = _apply_window(x, window1, axis=0, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els1_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning(np.ones(x.shape[1]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els2_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y, window2 = _apply_window(x, window, axis=1, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1 * x[i, :]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_els3_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y = _apply_window(x, window, axis=1, return_window=False)
yt = _apply_window(x, window1, axis=1, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_stride_windows_hanning_2D_n13_noverlapn3_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
yi = mlab.stride_windows(x, n=13, noverlap=2, axis=0)
y = _apply_window(yi, window, axis=0, return_window=False)
yt = self.check_window_apply_repeat(x, window, 13, 2)
assert yt.shape == y.shape
assert x.shape != y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_stack_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1_unflatten(self):
n = 32
ydata = np.arange(n)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydata = ydata.flatten()
ydata1 = mlab.stride_windows(ydata, 32, noverlap=0, axis=0)
result = _apply_window(ydata1, mlab.window_hanning, axis=0,
return_window=False)
assert_allclose(ycontrol.T, result, atol=1e-08)
class TestDetrend:
def setup(self):
np.random.seed(0)
n = 1000
x = np.linspace(0., 100, n)
self.sig_zeros = np.zeros(n)
self.sig_off = self.sig_zeros + 100.
self.sig_slope = np.linspace(-10., 90., n)
self.sig_slope_mean = x - x.mean()
sig_rand = np.random.standard_normal(n)
sig_sin = np.sin(x*2*np.pi/(n/100))
sig_rand -= sig_rand.mean()
sig_sin -= sig_sin.mean()
self.sig_base = sig_rand + sig_sin
self.atol = 1e-08
def test_detrend_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend_none(input)
assert input == targ
def test_detrend_none_0D_zeros_axis1(self):
input = 0.
targ = input
mlab.detrend_none(input, axis=1)
assert input == targ
def test_detrend_str_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend(input, key='none')
assert input == targ
def test_detrend_detrend_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend(input, key=mlab.detrend_none)
assert input == targ
def test_detrend_none_0D_off(self):
input = 5.5
targ = input
mlab.detrend_none(input)
assert input == targ
def test_detrend_none_1D_off(self):
input = self.sig_off
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_slope(self):
input = self.sig_slope
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base(self):
input = self.sig_base
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = input.tolist()
res = mlab.detrend_none(input.tolist())
assert res == targ
def test_detrend_none_2D(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_2D_T(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input.T)
assert_array_equal(res.T, targ)
def test_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_1D_zeros(self):
input = self.sig_zeros
targ = self.sig_zeros
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base(self):
input = self.sig_base
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_off(self):
input = self.sig_base + self.sig_off
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope(self):
input = self.sig_base + self.sig_slope
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist(), axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, axis=None)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_str_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='mean', axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_str_constant_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend(input, key='constant', axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_str_default_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='default', axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_mean, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_bad_key_str_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, key='spam')
def test_detrend_bad_key_var_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, key=5)
def test_detrend_mean_0D_d0_ValueError(self):
input = 5.5
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=0)
def test_detrend_0D_d0_ValueError(self):
input = 5.5
with pytest.raises(ValueError):
mlab.detrend(input, axis=0)
def test_detrend_mean_1D_d1_ValueError(self):
input = self.sig_slope
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=1)
def test_detrend_1D_d1_ValueError(self):
input = self.sig_slope
with pytest.raises(ValueError):
mlab.detrend(input, axis=1)
def test_detrend_mean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=2)
def test_detrend_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, axis=2)
def test_detrend_linear_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_str_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='linear')
assert_almost_equal(res, targ)
def test_detrend_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_almost_equal(res, targ)
def test_detrend_linear_1d_off(self):
input = self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope(self):
input = self.sig_slope
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key='linear')
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off_list(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input.tolist())
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_2D_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend_linear(input)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='linear', axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_linear, axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_2d_slope_off_axis0_notranspose(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='linear', axis=1)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1_notranspose(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key=mlab.detrend_linear, axis=1)
assert_allclose(res, targ, atol=self.atol)
@pytest.mark.parametrize('iscomplex', [False, True],
ids=['real', 'complex'], scope='class')
@pytest.mark.parametrize('sides', ['onesided', 'twosided', 'default'],
scope='class')
@pytest.mark.parametrize(
'fstims,len_x,NFFT_density,nover_density,pad_to_density,pad_to_spectrum',
[
([], None, -1, -1, -1, -1),
([4], None, -1, -1, -1, -1),
([4, 5, 10], None, -1, -1, -1, -1),
([], None, None, -1, -1, None),
([], None, -1, -1, None, None),
([], None, None, -1, None, None),
([], 1024, 512, -1, -1, 128),
([], 256, -1, -1, 33, 257),
([], 255, 33, -1, -1, None),
([], 256, 128, -1, 256, 256),
([], None, -1, 32, -1, -1),
],
ids=[
'nosig',
'Fs4',
'FsAll',
'nosig_noNFFT',
'nosig_nopad_to',
'nosig_noNFFT_no_pad_to',
'nosig_trim',
'nosig_odd',
'nosig_oddlen',
'nosig_stretch',
'nosig_overlap',
],
scope='class')
class TestSpectral:
@pytest.fixture(scope='class', autouse=True)
def stim(self, request, fstims, iscomplex, sides, len_x, NFFT_density,
nover_density, pad_to_density, pad_to_spectrum):
Fs = 100.
x = np.arange(0, 10, 1 / Fs)
if len_x is not None:
x = x[:len_x]
# get the stimulus frequencies, defaulting to None
fstims = [Fs / fstim for fstim in fstims]
# get the constants, default to calculated values
if NFFT_density is None:
NFFT_density_real = 256
elif NFFT_density < 0:
NFFT_density_real = NFFT_density = 100
else:
NFFT_density_real = NFFT_density
if nover_density is None:
nover_density_real = 0
elif nover_density < 0:
nover_density_real = nover_density = NFFT_density_real // 2
else:
nover_density_real = nover_density
if pad_to_density is None:
pad_to_density_real = NFFT_density_real
elif pad_to_density < 0:
pad_to_density = int(2**np.ceil(np.log2(NFFT_density_real)))
pad_to_density_real = pad_to_density
else:
pad_to_density_real = pad_to_density
if pad_to_spectrum is None:
pad_to_spectrum_real = len(x)
elif pad_to_spectrum < 0:
pad_to_spectrum_real = pad_to_spectrum = len(x)
else:
pad_to_spectrum_real = pad_to_spectrum
if pad_to_spectrum is None:
NFFT_spectrum_real = NFFT_spectrum = pad_to_spectrum_real
else:
NFFT_spectrum_real = NFFT_spectrum = len(x)
nover_spectrum = 0
NFFT_specgram = NFFT_density
nover_specgram = nover_density
pad_to_specgram = pad_to_density
NFFT_specgram_real = NFFT_density_real
nover_specgram_real = nover_density_real
if sides == 'onesided' or (sides == 'default' and not iscomplex):
# frequencies for specgram, psd, and csd
# need to handle even and odd differently
if pad_to_density_real % 2:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real,
endpoint=False)[::2]
else:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real // 2 + 1)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)[::2]
else:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real // 2 + 1)
else:
# frequencies for specgram, psd, and csd
# need to handle even and odd differentl
if pad_to_density_real % 2:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_density_real,
endpoint=False)[1::2]
else:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_density_real,
endpoint=False)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_spectrum_real,
endpoint=False)[1::2]
else:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)
freqs_specgram = freqs_density
# time points for specgram
t_start = NFFT_specgram_real // 2
t_stop = len(x) - NFFT_specgram_real // 2 + 1
t_step = NFFT_specgram_real - nover_specgram_real
t_specgram = x[t_start:t_stop:t_step]
if NFFT_specgram_real % 2:
t_specgram += 1 / Fs / 2
if len(t_specgram) == 0:
t_specgram = np.array([NFFT_specgram_real / (2 * Fs)])
t_spectrum = np.array([NFFT_spectrum_real / (2 * Fs)])
t_density = t_specgram
y = np.zeros_like(x)
for i, fstim in enumerate(fstims):
y += np.sin(fstim * x * np.pi * 2) * 10**i
if iscomplex:
y = y.astype('complex')
# Interestingly, the instance on which this fixture is called is not
# the same as the one on which a test is run. So we need to modify the
# class itself when using a class-scoped fixture.
cls = request.cls
cls.Fs = Fs
cls.sides = sides
cls.fstims = fstims
cls.NFFT_density = NFFT_density
cls.nover_density = nover_density
cls.pad_to_density = pad_to_density
cls.NFFT_spectrum = NFFT_spectrum
cls.nover_spectrum = nover_spectrum
cls.pad_to_spectrum = pad_to_spectrum
cls.NFFT_specgram = NFFT_specgram
cls.nover_specgram = nover_specgram
cls.pad_to_specgram = pad_to_specgram
cls.t_specgram = t_specgram
cls.t_density = t_density
cls.t_spectrum = t_spectrum
cls.y = y
cls.freqs_density = freqs_density
cls.freqs_spectrum = freqs_spectrum
cls.freqs_specgram = freqs_specgram
cls.NFFT_density_real = NFFT_density_real
def check_freqs(self, vals, targfreqs, resfreqs, fstims):
assert resfreqs.argmin() == 0
assert resfreqs.argmax() == len(resfreqs)-1
assert_allclose(resfreqs, targfreqs, atol=1e-06)
for fstim in fstims:
i = np.abs(resfreqs - fstim).argmin()
assert vals[i] > vals[i+2]
assert vals[i] > vals[i-2]
def check_maxfreq(self, spec, fsp, fstims):
# skip the test if there are no frequencies
if len(fstims) == 0:
return
# if twosided, do the test for each side
if fsp.min() < 0:
fspa = np.abs(fsp)
zeroind = fspa.argmin()
self.check_maxfreq(spec[:zeroind], fspa[:zeroind], fstims)
self.check_maxfreq(spec[zeroind:], fspa[zeroind:], fstims)
return
fstimst = fstims[:]
spect = spec.copy()
# go through each peak and make sure it is correctly the maximum peak
while fstimst:
maxind = spect.argmax()
maxfreq = fsp[maxind]
assert_almost_equal(maxfreq, fstimst[-1])
del fstimst[-1]
spect[maxind-5:maxind+5] = 0
def test_spectral_helper_raises(self):
# We don't use parametrize here to handle ``y = self.y``.
for kwargs in [ # Various error conditions:
{"y": self.y+1, "mode": "complex"}, # Modes requiring ``x is y``.
{"y": self.y+1, "mode": "magnitude"},
{"y": self.y+1, "mode": "angle"},
{"y": self.y+1, "mode": "phase"},
{"mode": "spam"}, # Bad mode.
{"y": self.y, "sides": "eggs"}, # Bad sides.
{"y": self.y, "NFFT": 10, "noverlap": 20}, # noverlap > NFFT.
{"NFFT": 10, "noverlap": 10}, # noverlap == NFFT.
{"y": self.y, "NFFT": 10,
"window": np.ones(9)}, # len(win) != NFFT.
]:
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, **kwargs)
@pytest.mark.parametrize('mode', ['default', 'psd'])
def test_single_spectrum_helper_unsupported_modes(self, mode):
with pytest.raises(ValueError):
mlab._single_spectrum_helper(x=self.y, mode=mode)
@pytest.mark.parametrize("mode, case", [
("psd", "density"),
("magnitude", "specgram"),
("magnitude", "spectrum"),
])
def test_spectral_helper_psd(self, mode, case):
freqs = getattr(self, f"freqs_{case}")
spec, fsp, t = mlab._spectral_helper(
x=self.y, y=self.y,
NFFT=getattr(self, f"NFFT_{case}"),
Fs=self.Fs,
noverlap=getattr(self, f"nover_{case}"),
pad_to=getattr(self, f"pad_to_{case}"),
sides=self.sides,
mode=mode)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, getattr(self, f"t_{case}"), atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == getattr(self, f"t_{case}").shape[0]
def test_csd(self):
freqs = self.freqs_density
spec, fsp = mlab.csd(x=self.y, y=self.y+1,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_csd_padding(self):
"""Test zero padding of csd()."""
if self.NFFT_density is None: # for derived classes
return
sargs = dict(x=self.y, y=self.y+1, Fs=self.Fs, window=mlab.window_none,
sides=self.sides)
spec0, _ = mlab.csd(NFFT=self.NFFT_density, **sargs)
spec1, _ = mlab.csd(NFFT=self.NFFT_density*2, **sargs)
assert_almost_equal(np.sum(np.conjugate(spec0)*spec0).real,
np.sum(np.conjugate(spec1/2)*spec1/2).real)
def test_psd(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert spec.shape == freqs.shape
self.check_freqs(spec, freqs, fsp, self.fstims)
@pytest.mark.parametrize(
'make_data, detrend',
[(np.zeros, mlab.detrend_mean), (np.zeros, 'mean'),
(np.arange, mlab.detrend_linear), (np.arange, 'linear')])
def test_psd_detrend(self, make_data, detrend):
if self.NFFT_density is None:
return
ydata = make_data(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=detrend)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=detrend)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning(self):
if self.NFFT_density is None:
return
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1, windowVals = _apply_window(ydata1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning_detrend_linear(self):
if self.NFFT_density is None:
return
ydata = np.arange(self.NFFT_density)
ycontrol = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = ycontrol
ycontrol2 = ycontrol
ycontrol1, windowVals = _apply_window(ycontrol1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ycontrol2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_windowarray(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=np.ones(self.NFFT_density_real))
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_psd_windowarray_scale_by_freq(self):
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning)
spec_s, fsp_s = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=True)
spec_n, fsp_n = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=False)
assert_array_equal(fsp, fsp_s)
assert_array_equal(fsp, fsp_n)
assert_array_equal(spec, spec_s)
assert_allclose(spec_s*(win**2).sum(),
spec_n/self.Fs*win.sum()**2,
atol=1e-08)
@pytest.mark.parametrize(
"kind", ["complex", "magnitude", "angle", "phase"])
def test_spectrum(self, kind):
freqs = self.freqs_spectrum
spec, fsp = getattr(mlab, f"{kind}_spectrum")(
x=self.y,
Fs=self.Fs, sides=self.sides, pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
if kind == "magnitude":
self.check_maxfreq(spec, fsp, self.fstims)
self.check_freqs(spec, freqs, fsp, self.fstims)
@pytest.mark.parametrize(
'kwargs',
[{}, {'mode': 'default'}, {'mode': 'psd'}, {'mode': 'magnitude'},
{'mode': 'complex'}, {'mode': 'angle'}, {'mode': 'phase'}])
def test_specgram(self, kwargs):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
**kwargs)
if kwargs.get('mode') == 'complex':
spec = np.abs(spec)
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
if kwargs.get('mode') not in ['complex', 'angle', 'phase']:
# using a single freq, so all time slices should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(
np.diff(spec, axis=1).max() / np.abs(spec.max()), 0,
atol=1e-02)
if kwargs.get('mode') not in ['angle', 'phase']:
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_warn_only1seg(self):
"""Warning should be raised if len(x) <= NFFT."""
with pytest.warns(UserWarning, match="Only one segment is calculated"):
mlab.specgram(x=self.y, NFFT=len(self.y), Fs=self.Fs)
def test_psd_csd_equal(self):
Pxx, freqsxx = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
Pxy, freqsxy = mlab.csd(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_almost_equal_nulp(Pxx, Pxy)
assert_array_equal(freqsxx, freqsxy)
@pytest.mark.parametrize("mode", ["default", "psd"])
def test_specgram_auto_default_psd_equal(self, mode):
"""
Test that mlab.specgram without mode and with mode 'default' and 'psd'
are all the same.
"""
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specb, freqspecb, tb = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode=mode)
assert_array_equal(speca, specb)
assert_array_equal(freqspeca, freqspecb)
assert_array_equal(ta, tb)
@pytest.mark.parametrize(
"mode, conv", [
("magnitude", np.abs),
("angle", np.angle),
("phase", lambda x: np.unwrap(np.angle(x), axis=0))
])
def test_specgram_complex_equivalent(self, mode, conv):
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm, freqspecm, tm = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode=mode)
assert_array_equal(freqspecc, freqspecm)
assert_array_equal(tc, tm)
assert_allclose(conv(specc), specm, atol=1e-06)
def test_psd_windowarray_equal(self):
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
speca, fspa = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=win)
specb, fspb = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(fspa, fspb)
assert_allclose(speca, specb, atol=1e-08)
# extra test for cohere...
def test_cohere():
N = 1024
np.random.seed(19680801)
x = np.random.randn(N)
# phase offset
y = np.roll(x, 20)
# high-freq roll-off
y = np.convolve(y, np.ones(20) / 20., mode='same')
cohsq, f = mlab.cohere(x, y, NFFT=256, Fs=2, noverlap=128)
assert_allclose(np.mean(cohsq), 0.837, atol=1.e-3)
assert np.isreal(np.mean(cohsq))
#*****************************************************************
# These Tests where taken from SCIPY with some minor modifications
# this can be retrieved from:
# https://github.com/scipy/scipy/blob/master/scipy/stats/tests/test_kdeoth.py
#*****************************************************************
class TestGaussianKDE:
def test_kde_integer_input(self):
"""Regression test for #1181."""
x1 = np.arange(5)
kde = mlab.GaussianKDE(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869,
0.13480721]
np.testing.assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_gaussian_kde_covariance_caching(self):
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754,
0.01664475]
# set it to the default bandwidth.
kde2 = mlab.GaussianKDE(x1, 'scott')
y2 = kde2(xs)
np.testing.assert_array_almost_equal(y_expected, y2, decimal=7)
def test_kde_bandwidth_method(self):
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = mlab.GaussianKDE(xn)
# Supply a callable
gkde2 = mlab.GaussianKDE(xn, 'scott')
# Supply a scalar
gkde3 = mlab.GaussianKDE(xn, bw_method=gkde.factor)
xs = np.linspace(-7, 7, 51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert kdepdf.all() == kdepdf2.all()
kdepdf3 = gkde3.evaluate(xs)
assert kdepdf.all() == kdepdf3.all()
class TestGaussianKDECustom:
def test_no_data(self):
"""Pass no data into the GaussianKDE class."""
with pytest.raises(ValueError):
mlab.GaussianKDE([])
def test_single_dataset_element(self):
"""Pass a single dataset element into the GaussianKDE class."""
with pytest.raises(ValueError):
mlab.GaussianKDE([42])
def test_silverman_multidim_dataset(self):
"""Test silverman's for a multi-dimensional array."""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
with pytest.raises(np.linalg.LinAlgError):
mlab.GaussianKDE(x1, "silverman")
def test_silverman_singledim_dataset(self):
"""Test silverman's output for a single dimension list."""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "silverman")
y_expected = 0.76770389927475502
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scott_multidim_dataset(self):
"""Test scott's output for a multi-dimensional array."""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
with pytest.raises(np.linalg.LinAlgError):
mlab.GaussianKDE(x1, "scott")
def test_scott_singledim_dataset(self):
"""Test scott's output a single-dimensional array."""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "scott")
y_expected = 0.72477966367769553
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scalar_empty_dataset(self):
"""Test the scalar's cov factor for an empty array."""
with pytest.raises(ValueError):
mlab.GaussianKDE([], bw_method=5)
def test_scalar_covariance_dataset(self):
"""Test a scalar's cov factor."""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
kde = mlab.GaussianKDE(multidim_data, bw_method=0.5)
assert kde.covariance_factor() == 0.5
def test_callable_covariance_dataset(self):
"""Test the callable's cov factor for a multi-dimensional array."""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
def callable_fun(x):
return 0.55
kde = mlab.GaussianKDE(multidim_data, bw_method=callable_fun)
assert kde.covariance_factor() == 0.55
def test_callable_singledim_dataset(self):
"""Test the callable's cov factor for a single-dimensional array."""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data, bw_method='silverman')
y_expected = 0.48438841363348911
assert_almost_equal(kde.covariance_factor(), y_expected, 7)
def test_wrong_bw_method(self):
"""Test the error message that should be called when bw is invalid."""
np.random.seed(8765678)
n_basesample = 50
data = np.random.randn(n_basesample)
with pytest.raises(ValueError):
mlab.GaussianKDE(data, bw_method="invalid")
class TestGaussianKDEEvaluate:
def test_evaluate_diff_dim(self):
"""
Test the evaluate method when the dim's of dataset and points have
different dimensions.
"""
x1 = np.arange(3, 10, 2)
kde = mlab.GaussianKDE(x1)
x2 = np.arange(3, 12, 2)
y_expected = [
0.08797252, 0.11774109, 0.11774109, 0.08797252, 0.0370153
]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_inv_dim(self):
"""
Invert the dimensions; i.e., for a dataset of dimension 1 [3, 2, 4],
the points should have a dimension of 3 [[3], [2], [4]].
"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data)
x2 = [[1], [2], [3]]
with pytest.raises(ValueError):
kde.evaluate(x2)
def test_evaluate_dim_and_num(self):
"""Tests if evaluated against a one by one array"""
x1 = np.arange(3, 10, 2)
x2 = np.array([3])
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_point_dim_not_one(self):
x1 = np.arange(3, 10, 2)
x2 = [np.arange(3, 10, 2), np.arange(3, 10, 2)]
kde = mlab.GaussianKDE(x1)
with pytest.raises(ValueError):
kde.evaluate(x2)
def test_evaluate_equal_dim_and_num_lt(self):
x1 = np.arange(3, 10, 2)
x2 = np.arange(3, 8, 2)
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252, 0.11774109, 0.11774109]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_psd_onesided_norm():
u = np.array([0, 1, 2, 3, 1, 2, 1])
dt = 1.0
Su = np.abs(np.fft.fft(u) * dt)**2 / (dt * u.size)
P, f = mlab.psd(u, NFFT=u.size, Fs=1/dt, window=mlab.window_none,
detrend=mlab.detrend_none, noverlap=0, pad_to=None,
scale_by_freq=None,
sides='onesided')
Su_1side = np.append([Su[0]], Su[1:4] + Su[4:][::-1])
assert_allclose(P, Su_1side, atol=1e-06)
def test_psd_oversampling():
"""Test the case len(x) < NFFT for psd()."""
u = np.array([0, 1, 2, 3, 1, 2, 1])
dt = 1.0
Su = np.abs(np.fft.fft(u) * dt)**2 / (dt * u.size)
P, f = mlab.psd(u, NFFT=u.size*2, Fs=1/dt, window=mlab.window_none,
detrend=mlab.detrend_none, noverlap=0, pad_to=None,
scale_by_freq=None,
sides='onesided')
Su_1side = np.append([Su[0]], Su[1:4] + Su[4:][::-1])
assert_almost_equal(np.sum(P), np.sum(Su_1side)) # same energy
|
129547
|
from opera.parser.yaml.node import Node
from .constraint_clause import ConstraintClause
from .property_definition import PropertyDefinition
from ..entity import TypeEntity
from ..list import List
from ..map import Map
from ..reference import DataTypeReference
class DataType(TypeEntity):
REFERENCE = DataTypeReference("data_types")
ATTRS = dict(
constraints=List(ConstraintClause),
properties=Map(PropertyDefinition),
)
@classmethod
def normalize(cls, yaml_node):
# Let the validator handle non-dict case
if not isinstance(yaml_node.value, dict):
return yaml_node
# Make sure we have derived_from key
for k in yaml_node.value:
if k.value == "derived_from":
return yaml_node
# Create default derived_from spec if missing
data = {Node("derived_from"): Node("None")}
data.update(yaml_node.value)
return Node(data, yaml_node.loc)
|
129573
|
import pytest
from napkin import sd
from napkin import sd_action
class TestParams:
def test_empty(self):
assert "" == str(sd.Params(tuple(), dict()))
def test_args(self):
assert "abc, def" == str(sd.Params(('abc', 'def'), dict()))
def test_args_kargs(self):
assert "abc, foo=1" == str(sd.Params(('abc',),
dict(foo=1)))
class TestBase(object):
def check(self, context, exp_actions):
actions = context._sequence
# This is for better debugging
assert str(actions) == str(exp_actions)
assert actions == exp_actions
class TestTopLevel(TestBase):
def test_call(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
])
def test_call_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
bar.func2()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.Call(foo, bar, 'func2', sd.Params()),
sd_action.ImplicitReturn(),
])
def test_call_with_return(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func().ret('val')
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Return(sd.Params(('val',))),
])
def test_call_with_return_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func().ret('val')
bar.func2().ret('val2')
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Return(sd.Params(('val',))),
sd_action.Call(foo, bar, 'func2', sd.Params()),
sd_action.Return(sd.Params(('val2',))),
])
def test_fail_when_separate_return_called(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
with pytest.raises(sd.CallError):
c.ret()
def test_fail_when_top_level_caller_set_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
with pytest.raises(sd.TopLevelCallerError):
with foo:
bar.func()
def test_noop_when_do_nothing_in_top_level_caller(self):
c = sd.Context()
foo = c.object('foo')
with foo:
pass
self.check(c, [
])
class TestSecondLevel(TestBase):
def test_call(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Call(bar, baz, 'func2', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.ImplicitReturn(),
])
def test_call_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
baz.func3()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Call(bar, baz, 'func2', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.Call(bar, baz, 'func3', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.ImplicitReturn(),
])
def test_call_with_return(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2().ret()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Call(bar, baz, 'func2', sd.Params()),
sd_action.Return(sd.Params()),
sd_action.ImplicitReturn(),
])
def test_return_from_outside_func(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
c.ret()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Call(bar, baz, 'func2', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.Return(sd.Params()),
])
def test_fail_when_call_after_returning_from_outside_func(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
c.ret()
with pytest.raises(sd.CallError):
baz.func3()
def test_fail_when_return_again_from_outside_func(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
c.ret()
with pytest.raises(sd.ReturnError):
c.ret()
def test_return_from_outside_func_without_calling_any(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
with bar.func():
c.ret()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Return(sd.Params()),
])
def test_do_nothing_in_outside_func(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
with bar.func():
pass
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
])
class TestCreate(TestBase):
def test_simple(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.create(bar)
self.check(c, [
sd_action.Call(foo, bar, '<<create>>', sd.Params(), flags='c'),
sd_action.ImplicitReturn(),
])
def test_non_default_method(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.create(bar.new())
self.check(c, [
sd_action.Call(foo, bar, 'new', sd.Params(), flags='c'),
sd_action.ImplicitReturn(),
])
def test_constructor_params(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.create(bar.new('a', name='bar'))
self.check(c, [
sd_action.Call(foo, bar, 'new',
params=sd.Params(('a',), dict(name='bar')),
flags='c'),
sd_action.ImplicitReturn(),
])
def test_call_others_in_constructor(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with c.create(bar):
baz.func()
self.check(c, [
sd_action.Call(foo, bar, '<<create>>', sd.Params(), flags='c'),
sd_action.Call(bar, baz, 'func', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.ImplicitReturn(),
])
def test_fail_if_called_at_top_level(self):
c = sd.Context()
with pytest.raises(sd.CreateError):
bar = c.object('bar')
c.create(bar)
def test_fail_if_create_object_already_being_used(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
with pytest.raises(sd.CreateError):
c.create(bar)
def test_fail_if_create_object_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.create(bar)
with pytest.raises(sd.CreateError):
c.create(bar)
class TestDestroy(TestBase):
def test_simple(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
c.destroy(bar)
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.Call(foo, bar, '<<destroy>>', sd.Params(), flags='d'),
sd_action.ImplicitReturn(),
])
def test_fail_when_call_destroyed_object(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.destroy(bar)
with pytest.raises(sd.CallError):
bar.func()
def test_call_other_methods_of_the_same_object_from_destructr(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
with c.destroy(bar):
bar.func()
def test_fail_when_destroy_twice_the_same_object(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.destroy(bar)
with pytest.raises(sd.CallError):
c.destroy(bar)
def test_fail_if_called_at_top_level(self):
c = sd.Context()
foo = c.object('foo')
with pytest.raises(sd.DestroyError):
c.destroy(foo)
class TestNote(TestBase):
def test_over_object_implicit(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.note('blah')
bar.func()
self.check(c, [
sd_action.Note('blah', obj=foo),
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
])
def test_over_object_explicit(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
foo.note('blah')
bar.note('blah2')
bar.func()
self.check(c, [
sd_action.Note('blah', obj=foo),
sd_action.Note('blah2', obj=bar),
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
])
def test_call_specific(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
bar.func().note('callee side note')
baz.func().note(caller='caller side note',
callee='callee side note')
baz.func2().note('note').ret('val')
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params(),
notes=['callee side note', None]),
sd_action.ImplicitReturn(),
sd_action.Call(foo, baz, 'func', sd.Params(),
notes=['callee side note', 'caller side note']),
sd_action.ImplicitReturn(),
sd_action.Call(foo, baz, 'func2', sd.Params(),
notes=['note', None]),
sd_action.Return(sd.Params(('val',))),
])
class TestOutside(TestBase):
def test_fail_as_callee(self):
c = sd.Context()
foo = c.object('foo')
outside = c.outside()
with foo:
with pytest.raises(sd.CallError,
match='Cannot be invoked to the outside'):
outside.func()
|
129622
|
import datetime
from enum import Enum
from typing import Dict, List
from dataclasses import field
# we use pydantic for dataclasses so that we can
# easily load and validate JSON reports.
#
# pydantic checks all the JSON fields look as they should
# while using the nice and familiar dataclass syntax.
#
# really, you should just pretend we're using stock dataclasses.
from pydantic.dataclasses import dataclass
@dataclass(frozen=True)
class StackString:
"""
here's what the following members represent:
[smaller addresses]
+---------------+ <- stack_pointer (top of stack)
| | \
+---------------+ | offset
| | /
+---------------+
| "abc" | \
+---------------+ |
| | |
+---------------+ | frame_offset
| | |
+---------------+ |
| | /
+---------------+ <- original_stack_pointer (bottom of stack, probably bp)
[bigger addresses]
Attributes:
function: the address of the function from which the stackstring was extracted
string: the extracted string
program_counter: the program counter at the moment the string was extracted
stack_pointer: the stack counter at the moment the string was extracted
original_stack_pointer: the initial stack counter when the function was entered
offset: the offset into the stack from at which the stack string was found
frame_offset: the offset from the function frame at which the stack string was found
"""
function: int
string: str
program_counter: int
stack_pointer: int
original_stack_pointer: int
offset: int
frame_offset: int
class AddressType(str, Enum):
STACK = "STACK"
GLOBAL = "GLOBAL"
HEAP = "HEAP"
@dataclass(frozen=True)
class DecodedString:
"""
A decoding string and details about where it was found.
Attributes:
address: address of the string in memory
address_type: type of the address of the string in memory
string: the decoded string
decoded_at: the address at which the decoding routine is called
decoding_routine: the address of the decoding routine
"""
address: int
address_type: AddressType
string: str
decoded_at: int
decoding_routine: int
class StringEncoding(str, Enum):
ASCII = "ASCII"
UTF16LE = "UTF-16LE"
@dataclass(frozen=True)
class StaticString:
"""
A string extracted from the raw bytes of the input.
Attributes:
string: the string
offset: the offset into the input where the string is found
encoding: the string encoding, like ASCII or unicode
"""
string: str
offset: int
encoding: StringEncoding
@dataclass
class Metadata:
file_path: str
imagebase: int = 0
date: datetime.datetime = datetime.datetime.now()
analysis: Dict[str, Dict] = field(default_factory=dict)
enable_stack_strings: bool = True
enable_decoded_strings: bool = True
enable_static_strings: bool = True
@dataclass
class Strings:
stack_strings: List[StackString] = field(default_factory=list)
decoded_strings: List[DecodedString] = field(default_factory=list)
static_strings: List[StaticString] = field(default_factory=list)
@dataclass
class ResultDocument:
metadata: Metadata
strings: Strings = field(default_factory=Strings)
@classmethod
def parse_file(cls, path):
return cls.__pydantic_model__.parse_file(path)
|
129623
|
import ast
import sqlite3
import sys
import traceback
import unittest
from io import StringIO
from time import sleep
from unittest import mock
from littleutils import SimpleNamespace, only
import sorcery as spells
from sorcery import unpack_keys, unpack_attrs, print_args, magic_kwargs, maybe, args_with_source, spell
from sorcery.spells import PYPY
class MyListWrapper(object):
def __init__(self, lst):
self.list = lst
def _make_new_wrapper(self, method_name, *args, **kwargs):
method = getattr(self.list, method_name)
new_list = method(*args, **kwargs)
return type(self)(new_list)
append, extend, clear, __repr__, __str__, __eq__, __hash__, \
__contains__, __len__, remove, insert, pop, index, count, \
sort, __iter__, reverse, __iadd__ = spells.delegate_to_attr('list')
copy, __add__, __radd__, __mul__, __rmul__ = spells.call_with_name(_make_new_wrapper)
class Foo(object):
@magic_kwargs
def bar(self, **kwargs):
return set(kwargs.items()) | {self}
@magic_kwargs
def magic_only_kwarg(n, *, y):
return n, y
class TestStuff(unittest.TestCase):
def test_unpack_keys_basic(self):
obj = SimpleNamespace(thing=SimpleNamespace())
d = dict(foo=1, bar=3, spam=7, baz=8, x=9)
out = {}
foo, obj.thing.spam, obj.bar, out['baz'] = unpack_keys(d)
self.assertEqual(foo, d['foo'])
self.assertEqual(obj.bar, d['bar'])
self.assertEqual(obj.thing.spam, d['spam'])
self.assertEqual(out, {'baz': d['baz']})
def test_unpack_keys_for_loop(self):
results = []
for x, y in unpack_keys([
dict(x=1, y=2),
dict(x=3, z=4),
dict(a=5, y=6),
dict(b=7, c=8),
], default=999):
results.append((x, y))
self.assertEqual(results, [
(1, 2),
(3, 999),
(999, 6),
(999, 999),
])
def test_unpack_keys_list_comprehension(self):
self.assertEqual(
[(y, x) for x, y in unpack_keys([
dict(x=1, y=2),
dict(x=3, y=4),
])],
[
(2, 1),
(4, 3),
])
def test_unpack_keys_bigger_expression(self):
x, y = map(int, unpack_keys(dict(x='1', y='2')))
self.assertEqual(x, 1)
self.assertEqual(y, 2)
def test_unpack_keys_skip_single_assigned_name(self):
x, y = [int(v) for v in unpack_keys(dict(x='1', y='2'))]
self.assertEqual(x, 1)
self.assertEqual(y, 2)
def test_unpack_keys_extras(self):
env = dict(DATABASE_USERNAME='me',
DATABASE_PASSWORD='<PASSWORD>')
username, password = unpack_keys(env, prefix='DATABASE_', swapcase=True)
self.assertEqual(username, 'me')
self.assertEqual(password, '<PASSWORD>')
def test_unpack_attrs(self):
obj = SimpleNamespace(aa='bv', bb='cc', cc='aa')
cc, bb, aa = unpack_attrs(obj)
self.assertEqual(aa, obj.aa)
self.assertEqual(bb, obj.bb)
self.assertEqual(cc, obj.cc)
d, e = unpack_attrs(obj, default=9)
assert d == e == 9
def test_print_args(self):
out = StringIO()
x = 3
y = 4
print_args(x + y,
x * y,
x -
y, file=out)
self.assertEqual('''\
x + y =
7
x * y =
12
x -
y =
-1
''', out.getvalue())
def test_dict_of(self):
a = 1
obj = SimpleNamespace(b=2)
self.assertEqual(spells.dict_of(
a, obj.b,
c=3, d=4
), dict(
a=a, b=obj.b,
c=3, d=4))
def test_no_starargs_in_dict_of(self):
args = [1, 2]
with self.assertRaises(TypeError):
spells.dict_of(*args)
def test_delegation(self):
lst = MyListWrapper([1, 2, 3])
lst.append(4)
lst.extend([1, 2])
lst = (lst + [5]).copy()
self.assertEqual(type(lst), MyListWrapper)
self.assertEqual(lst, [1, 2, 3, 4, 1, 2, 5])
def test_magic_kwargs(self):
foo = Foo()
x = 1
y = 2
w = 10
self.assertEqual(foo.bar(x, y, z=3),
{('x', x), ('y', y), ('z', 3), foo})
self.assertEqual(magic_only_kwarg(x, y), (x, y))
@magic_kwargs
def spam(n, **kwargs):
return n, kwargs
self.assertEqual(spam(x, y, z=5),
(x, dict(y=y, z=5)))
@magic_kwargs
def spam(n, m, **kwargs):
return n, m, kwargs
self.assertEqual(spam(x, w, y, z=5),
(x, w, dict(y=y, z=5)))
with self.assertRaises(TypeError):
@magic_kwargs
def _(a=1):
print(a)
with self.assertRaises(TypeError):
@magic_kwargs
def _(*a):
print(a)
def test_maybe(self):
if PYPY:
with self.assertRaises(NotImplementedError):
maybe(None)
return
n = None
assert maybe(n) is None
self.assertIsNone(maybe(n))
assert maybe(n).a.b.c()[4]().asd.asd()() is None
assert maybe(n)()()() is None
assert maybe(0) == 0
assert maybe({'a': 3})['a'] == 3
assert maybe({'a': {'b': 3}})['a']['b'] == 3
assert maybe({'a': {'b': 3}})['a']['b'] + 2 == 5
assert maybe({'a': {'b': None}})['a']['b'] is None
def test_select_from(self):
conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute('CREATE TABLE points (x INT, y INT)')
c.execute("INSERT INTO points VALUES (5, 3), (8, 1)")
conn.commit()
assert [(3, 5), (1, 8)] == [(y, x) for y, x in spells.select_from('points')]
y = 1
x = spells.select_from('points', where=[y])
assert (x, y) == (8, 1)
def test_multiple_attr_calls(self):
x = 3
y = 5
self.assertEqual([
spells.dict_of(x),
spells.dict_of(y),
], [dict(x=x), dict(y=y)])
self.assertEqual([spells.dict_of(x), spells.dict_of(y)],
[dict(x=x), dict(y=y)])
def test_no_assignment(self):
with self.assertRaises(TypeError):
unpack_keys(dict(x=1, y=2))
def test_spell_repr(self):
self.assertRegex(repr(spells.dict_of),
r'Spell\(<function dict_of at 0x.+>\)')
def test_assigned_names(self):
x, y = ['_' + s for s in spells.assigned_names()]
self.assertEqual(x, '_x')
self.assertEqual(y, '_y')
# noinspection PyTrailingSemicolon
def test_semicolons(self):
# @formatter:off
tester(1); tester(2); tester(3)
tester(9
); tester(
8); tester(
99
); tester(33); tester([4,
5, 6, [
7]])
# @formatter:on
def test_args_with_source(self):
self.assertEqual(args_with_source(1 + 2, 3 * 4),
[("1 + 2", 3), ("3 * 4", 12)])
self.assertEqual(
args_with_source(
self.assertEqual(args_with_source(1 + 2), [("1 + 2", 3)])),
[(
'self.assertEqual(args_with_source(1 + 2), [("1 + 2", 3)])',
None,
)],
)
def test_switch(self):
result = spells.switch(2, lambda: {
1: 10,
2: 20,
1 / 0: 1 / 0
})
self.assertEqual(result, 20)
result = spells.switch(2, lambda: {
1: 10,
{{5, 2, 1 / 0}}: 20,
3: 1 / 0
})
self.assertEqual(result, 20)
with self.assertRaises(KeyError):
spells.switch(2, lambda: {
1: 10,
3: 30,
})
result = spells.switch(2, lambda: {
1: 10,
3: 30,
}, default=-1)
self.assertEqual(result, -1)
with self.assertRaises(TypeError):
spells.switch(2, {
1: 10,
2: 20,
})
with self.assertRaises(TypeError):
spells.switch(2, lambda: [{
1: 10,
2: 20,
}])
def test_timeit_in_function(self):
with self.assertRaises(ValueError):
spells.timeit()
def test_decorator(self):
@empty_decorator
@decorator_with_args(tester('123'), x=int())
@tester(list(tuple([1, 2])), returns=empty_decorator)
@tester(
list(
tuple(
[3, 4])),
returns=empty_decorator)
@empty_decorator
@decorator_with_args(
str(),
x=int())
@tester(list(tuple([5, 6])), returns=empty_decorator)
@tester(list(tuple([7, 8])), returns=empty_decorator)
@empty_decorator
@decorator_with_args(tester('sdf'), x=tester('123234'))
def foo():
pass
def test_list_comprehension(self):
str([tester(int(x)) for x in tester([1]) for _ in tester([2]) for __ in [3]])
str([[[tester(int(x)) for x in tester([1])] for _ in tester([2])] for __ in [3]])
return str([(1, [
(2, [
tester(int(x)) for x in tester([1])])
for _ in tester([2])])
for __ in [3]])
def test_lambda(self):
self.assertEqual((lambda x: (tester(x), tester(x)))(tester(3)), (3, 3))
(lambda: (lambda: tester(1))())()
self.assertEqual((lambda: [tester(x) for x in tester([1, 2])])(), [1, 2])
def test_indirect_call(self):
dict(x=tester)['x'](tester)(3)
def test_compound_statements(self):
with self.assertRaises(TypeError):
try:
for _ in tester([2]):
while tester(0):
pass
else:
tester(4)
else:
tester(5)
raise ValueError
except tester(ValueError):
tester(9)
raise TypeError
finally:
tester(10)
# PyCharm getting confused somehow?
# noinspection PyUnreachableCode
str()
with self.assertRaises(tester(Exception)):
if tester(0):
pass
elif tester(0):
pass
elif tester(1 / 0):
pass
def test_generator(self):
def gen():
for x in [1, 2]:
yield tester(x)
gen2 = (tester(x) for x in tester([1, 2]))
assert list(gen()) == list(gen2) == [1, 2]
@spell
def tester(frame_info, arg, returns=None):
result = eval(
compile(ast.Expression(only(frame_info.call.args)), '<>', 'eval'),
frame_info.frame.f_globals,
frame_info.frame.f_locals,
)
assert result == arg, (result, arg)
if returns is None:
return arg
return returns
assert tester([1, 2, 3]) == [1, 2, 3]
def empty_decorator(f):
return f
def decorator_with_args(*_, **__):
return empty_decorator
class TestTimeit(unittest.TestCase):
def patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
def setUp(self):
self.patch('sorcery.spells._raise', lambda e: e)
self.patch('sys.stdout', StringIO())
def assert_usual_output(self):
self.assertRegex(
sys.stdout.getvalue(),
r"""
Number of trials: 1
Method 1: 1\.\d{3}
Method 2: 1\.\d{3}
Method 1: 1\.\d{3}
Method 2: 1\.\d{3}
Best times:
-----------
Method 1: 1\.\d{3}
Method 2: 1\.\d{3}
""".strip())
def test_no_result(self):
if spells.timeit(repeat=2):
sleep(1)
else:
sleep(1.1)
self.assert_usual_output()
# noinspection PyUnusedLocal
def test_matching_result(self):
if spells.timeit(repeat=2):
sleep(1)
result = 3
else:
sleep(1.1)
result = 3
self.assert_usual_output()
# noinspection PyUnusedLocal
def test_not_matching_result(self):
with self.assertRaises(AssertionError):
if spells.timeit():
result = 3
else:
result = 4
def test_exception(self):
try:
if spells.timeit():
print(1 / 0)
else:
pass
except ZeroDivisionError:
traceback.print_exc(file=sys.stdout)
stdout = sys.stdout.getvalue()
self.assertIn('<timeit-src>', stdout)
self.assertIn('1 / 0', stdout)
if __name__ == '__main__':
unittest.main()
|
129627
|
from django.db import models
from taggit.managers import TaggableManager
class BaseModel(models.Model):
name = models.CharField(max_length=50, unique=True)
tags = TaggableManager()
def __unicode__(self):
return self.name
class Meta(object):
abstract = True
class AlphaModel(BaseModel):
pass
class BetaModel(BaseModel):
pass
|
129656
|
from axelrod import Player
class Grumpy(Player):
"""A player that defects after a ceratin level of grumpiness. Grumpiness increases when the opponent defects and decreases when the opponent co-operates."""
name = 'Grumpy'
def __init__(self, starting_state = 'Nice', grumpy_threshold = 10, nice_threshold = -10):
"""Player starts of nice be default with set thresholds"""
super(Grumpy, self).__init__()
self.history = []
self.score = 0
self.state = starting_state
self.starting_state = starting_state
self.grumpy_threshold = grumpy_threshold
self.nice_threshold = nice_threshold
def strategy(self, opponent):
"""A player that gets grumpier the more the opposition defects, and nicer the more they cooperate.
Starts off Nice, but becomes grumpy once the grumpiness threshold is hit.
Won't become nice once that grumpy threshold is hit, but must reach a much lower threshold before it becomes nice again.
"""
self.grumpiness = sum(play=='D' for play in opponent.history) - sum(play=='C' for play in opponent.history)
if self.state == 'Nice':
if self.grumpiness > self.grumpy_threshold:
self.state = 'Grumpy'
return 'D'
return 'C'
if self.state == 'Grumpy':
if self.grumpiness < self.nice_threshold:
self.state = 'Nice'
return 'C'
return 'D'
def reset(self):
"""Resets score, history and state for the next round of the tournement."""
self.history = []
self.state = self.starting_state
|
129670
|
import EmailParser.pst_parser
"""
if __name__ == "__main__":
pass
else:
from EmailBoxClass import EmailBox
EmailBox.main = EmailParser.pst_parser.main
"""
|
129685
|
from arekit.common.news.objects_parser import SentenceObjectsParserPipelineItem
from arekit.contrib.source.rusentrel.sentence import RuSentRelSentence
class RuSentRelTextEntitiesParser(SentenceObjectsParserPipelineItem):
def __init__(self):
super(RuSentRelTextEntitiesParser, self).__init__(
iter_objs_func=self.__iter_subs_values_with_bounds)
@staticmethod
def __iter_subs_values_with_bounds(sentence):
assert(isinstance(sentence, RuSentRelSentence))
return sentence.iter_entity_with_local_bounds()
|
129691
|
import os
import unittest
from nose.plugins import Plugin
from nose.plugins.plugintest import PluginTester
from nose.plugins.manager import ZeroNinePlugin
here = os.path.abspath(os.path.dirname(__file__))
support = os.path.join(os.path.dirname(os.path.dirname(here)), 'support')
class EmptyPlugin(Plugin):
pass
class TestEmptyPlugin(PluginTester, unittest.TestCase):
activate = '--with-empty'
plugins = [ZeroNinePlugin(EmptyPlugin())]
suitepath = os.path.join(here, 'empty_plugin.rst')
def test_empty_zero_nine_does_not_crash(self):
print self.output
assert "'EmptyPlugin' object has no attribute 'loadTestsFromPath'" \
not in self.output
|
129746
|
from __future__ import (
absolute_import,
unicode_literals,
)
import re
from typing import (
Any as AnyType,
Iterable,
List as ListType,
)
import warnings
import six
from conformity.fields.basic import (
Introspection,
UnicodeString,
)
from conformity.fields.utils import strip_none
from conformity.fields.net import IPAddress
from conformity.types import Error
class EmailAddress(UnicodeString):
"""
Conformity field that ensures that the value is a unicode string that is a valid email address according to
RFC 2822 and optionally accepts non-compliant fields listed in the `whitelist` argument. Substantially copied from
Django (v2.0.x): https://github.com/django/django/blob/stable/2.0.x/django/core/validators.py#L164.
"""
introspect_type = 'email_address'
ip_schema = IPAddress()
# unused, will be removed in version 2.0.0
message = None # type: ignore
code = None # type: ignore
user_regex = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE
)
domain_regex = re.compile(
# max length for domain name labels is 63 characters per RFC 1034
r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z',
re.IGNORECASE
)
literal_regex = re.compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:.]+)\]\Z',
re.IGNORECASE
)
domain_whitelist = frozenset({'localhost'})
def __init__(self, message=None, code=None, whitelist=None, **kwargs):
# type: (None, None, Iterable[six.text_type], **AnyType) -> None
"""
Construct a new email address field.
:param message: Deprecated, unused, and will be removed in version 2.0.0
:param code: Deprecated, unused, and will be removed in version 2.0.0
:param whitelist: If specified, an invalid domain part will be permitted if it is in this list
"""
if whitelist is not None and (
not hasattr(whitelist, '__iter__') or
not all(isinstance(c, six.text_type) for c in whitelist)
):
raise TypeError("'whitelist' must be an iterable of unicode strings")
if message is not None or code is not None:
warnings.warn(
'Arguments `message` and `code` are deprecated in EmailAddress and will be removed in Conformity 2.0.',
DeprecationWarning,
)
super(EmailAddress, self).__init__(**kwargs)
if whitelist is not None:
self.domain_whitelist = whitelist if isinstance(whitelist, frozenset) else frozenset(whitelist)
def errors(self, value): # type: (AnyType) -> ListType[Error]
# Get any basic type errors
result = super(EmailAddress, self).errors(value)
if result:
return result
if not value or '@' not in value:
return [Error('Not a valid email address (missing @ sign)')]
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
return [Error('Not a valid email address (invalid local user field)', pointer=user_part)]
if domain_part in self.domain_whitelist or self.is_domain_valid(domain_part):
return []
else:
try:
domain_part = domain_part.encode('idna').decode('ascii')
if self.is_domain_valid(domain_part):
return []
except UnicodeError:
pass
return [Error('Not a valid email address (invalid domain field)', pointer=domain_part)]
@classmethod
def is_domain_valid(cls, domain_part): # type: (six.text_type) -> bool
if cls.domain_regex.match(domain_part):
return True
literal_match = cls.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
if cls.ip_schema.errors(ip_address):
return False
else:
return True
return False
def introspect(self): # type: () -> Introspection
return strip_none({
'type': self.introspect_type,
'description': self.description,
'domain_whitelist': (
sorted(self.domain_whitelist)
if self.domain_whitelist and self.domain_whitelist is not self.__class__.domain_whitelist
else None
),
})
|
129759
|
import sublime
import sublime_lib.flags as flags
from sublime_lib.vendor.python.enum import IntFlag
from functools import reduce
from unittest import TestCase
class TestFlags(TestCase):
def _test_enum(self, enum, prefix=''):
for item in enum:
self.assertEqual(item, getattr(sublime, prefix + item.name))
self.assertEqual(item, enum(item.name))
if issubclass(enum, IntFlag):
self.assertEqual(
enum(*[item.name for item in enum]),
reduce(lambda a, b: a | b, enum)
)
def test_flags(self):
self._test_enum(flags.DialogResult, 'DIALOG_')
self._test_enum(flags.PointClass, 'CLASS_')
self._test_enum(flags.PhantomLayout, 'LAYOUT_')
self._test_enum(flags.HoverLocation, 'HOVER_')
self._test_enum(flags.QueryContextOperator, 'OP_')
self._test_enum(flags.FindOption)
self._test_enum(flags.RegionOption)
self._test_enum(flags.PopupOption)
self._test_enum(flags.OpenFileOption)
self._test_enum(flags.QuickPanelOption)
self._test_enum(flags.CompletionOptions)
def test_from_strings(self):
self.assertEqual(
flags.RegionOption('DRAW_EMPTY', 'HIDE_ON_MINIMAP'),
flags.RegionOption.DRAW_EMPTY | flags.RegionOption.HIDE_ON_MINIMAP
)
def test_from_strings_empty(self):
self.assertEqual(
flags.RegionOption(),
flags.RegionOption(0)
)
def test_query_context_operators(self):
ops = flags.QueryContextOperator
tests = [
('EQUAL', 'x', 'x', 'y'),
('REGEX_MATCH', 'aaa', r'a+', r'a'),
('REGEX_CONTAINS', 'aaa', r'a', r'b'),
]
for op, key, success, failure in tests:
self.assertTrue(ops(op).apply(key, success))
self.assertFalse(ops(op).apply(key, failure))
self.assertTrue(ops('NOT_' + op).apply(key, failure))
self.assertFalse(ops('NOT_' + op).apply(key, success))
|
129877
|
from django.db import migrations
try:
from django.contrib.postgres.fields import CIEmailField
except ImportError:
CIEmailField = None
else:
from django.contrib.postgres.operations import CITextExtension
def _operations():
if CIEmailField:
yield CITextExtension()
yield migrations.AlterField(
model_name="emailuser",
name="email",
field=CIEmailField(
db_index=True, max_length=254, unique=True, verbose_name="email address"
),
)
else:
yield migrations.RunSQL(
sql=(
"CREATE UNIQUE INDEX mailauth_user_emailuser_email_upper_idx"
' ON mailauth_user_emailuser (UPPER("email"));',
),
reverse_sql=("DROP INDEX mailauth_user_emailuser_email_upper_idx;",),
)
class Migration(migrations.Migration):
dependencies = [
("mailauth_user", "0002_emailuser_session_salt"),
]
operations = list(_operations())
|
129908
|
from slims.output import file_value
from slims.slims import Slims
from slims.step import Step, file_output
def execute():
# Make sure the path to the file exists
return file_value('C:/Users/User/Downloads/file.txt')
slims = Slims("slims", url="http://127.0.0.1:9999/", token="<PASSWORD>", local_host="0.0.0.0", local_port=5000)
slims.add_flow(
flow_id="FLOW_0001",
name="Download an file from server",
usage="CONTENT_MANAGEMENT",
steps=[
Step(
name="This downloads a file.",
action=execute,
output=[
file_output()
])
])
|
129949
|
import dash
dash.register_page(
__name__,
title="Forward Outlook",
description="This is the forward outlook", # should accept callable too
path="/forward-outlook",
image="birds.jpeg",
)
def layout():
return "Forward outlook"
|
129994
|
from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.basis import spinless_fermion_basis_1d
from quspin.basis import spinless_fermion_basis_general
import numpy as np
from itertools import product
def check_ME(b1,b2,opstr,indx,dtype,err_msg):
if b1.Ns != b2.Ns:
print(b1._basis)
print(b2._basis)
raise Exception("number of states do not match.")
ME1,row1,col1=b1.Op(opstr,indx,1.0,dtype)
ME2,row2,col2=b2.Op(opstr,indx,1.0,dtype)
if len(ME1) != len(ME2):
print(ME1)
print(row1)
print(col1)
print()
print(ME2)
print(row2)
print(col2)
raise Exception("number of matrix elements do not match.")
if len(ME1)>0 and len(ME2)>0:
try:
np.testing.assert_allclose(row1-row2,0,atol=1e-6,err_msg=err_msg)
np.testing.assert_allclose(col1-col2,0,atol=1e-6,err_msg=err_msg)
np.testing.assert_allclose(ME1-ME2,0,atol=1e-6,err_msg=err_msg)
except:
print(ME1)
print(row1)
print(col1)
print()
print(ME2)
print(row2)
print(col2)
raise Exception
def test_gen_basis_spinless_fermion(l_max,N=4):
L=6
kblocks = [None]
kblocks.extend(range(L))
pblocks = [None,0,1]
ops = ["n","z","+","-","I"]
Nfs = [None,N]
t = np.array([(i+1)%L for i in range(L)])
p = np.array([L-i-1 for i in range(L)])
for Nf,kblock,pblock in product(Nfs,kblocks,pblocks):
gen_blocks = {}
basis_blocks = {}
if kblock==0 or kblock==L//2:
if pblock is not None:
basis_blocks["pblock"] = (-1)**pblock
gen_blocks["pblock"] = (p,pblock)
else:
basis_blocks["pblock"] = None
gen_blocks["pblock"] = None
else:
basis_blocks["pblock"] = None
gen_blocks["pblock"] = None
if kblock is not None:
basis_blocks["kblock"] = kblock
gen_blocks["kblock"] = (t,kblock)
else:
basis_blocks["kblock"] = None
gen_blocks["kblock"] = None
basis_1d = spinless_fermion_basis_1d(L,Nf=Nf,**basis_blocks)
gen_basis = spinless_fermion_basis_general(L,Nf=Nf,**gen_blocks)
n = basis_1d._get_norms(np.float64)**2
n_gen = (gen_basis._n.astype(np.float64))*gen_basis._pers.prod()
if basis_1d.Ns != gen_basis.Ns:
print(L,basis_blocks)
print(basis_1d)
print(gen_basis)
raise ValueError("basis size mismatch")
np.testing.assert_allclose(basis_1d._basis-gen_basis._basis,0,atol=1e-6)
np.testing.assert_allclose(n-n_gen ,0,atol=1e-6)
for l in range(1,l_max+1):
for i0 in range(0,L-l+1,1):
indx = range(i0,i0+l,1)
for opstr in product(*[ops for i in range(l)]):
opstr = "".join(list(opstr))
printing = dict(basis_blocks)
printing["opstr"]=opstr
printing["indx"]=indx
printing["Nf"]=Nf
err_msg="testing: {opstr:} {indx:} Nf={Nf:} kblock={kblock:} pblock={pblock:}".format(**printing)
check_ME(basis_1d,gen_basis,opstr,indx,np.complex128,err_msg)
print("testing Nf=4")
test_gen_basis_spinless_fermion(3,N=4)
print("testing Nf=5")
test_gen_basis_spinless_fermion(3,N=5)
print("testing Nf=6")
test_gen_basis_spinless_fermion(3,N=6)
|
130010
|
import boto3
import json
import os
def parse_rule_violations(rule_violations):
rule_violations_text = ''
for rule in rule_violations:
bot_message = rule.get('Bot message')
del rule['Bot message']
rule_violations_text = ''.join([rule_violations_text,json.dumps(rule).replace('"', '').replace('{', '').replace('}', '').replace(',', '\n').replace("'", '') , '\nBot message: ' , bot_message])
return rule_violations_text
# Post the event. Currently this goes to SNS but this can be generalized if needed
def sendEvent(output_message, SNS_TOPIC_ARN):
output_type = os.getenv('OUTPUT_TYPE', '')
print(f'{__file__} - output type: {output_type} - TopicArn: {SNS_TOPIC_ARN}')
sns = boto3.client('sns')
if output_type == 'JSON':
text_output = output_message
response = sns.publish(
TopicArn=SNS_TOPIC_ARN,
Message=json.dumps(text_output),
Subject='RemediationLog',
MessageStructure='string'
)
else:
bots_messages = parse_rule_violations(output_message.get('Rules violations found', ['N.A']))
del output_message['Rules violations found']
text_output = ''.join([json.dumps(output_message).replace('"', '').replace('{', '').replace('}', '').replace(',', '\n').replace( "'", ''), '\nRule violations found:\n', bots_messages])
response = sns.publish(
TopicArn=SNS_TOPIC_ARN,
Message=text_output,
Subject='RemediationLog',
MessageStructure='string'
)
print(f'{__file__} - text_output: {text_output}')
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code > 400:
print(f'{__file__} - SNS message failed to send!')
print(f'{__file__} - {str(response)}')
else:
print(f'{__file__} - SNS message posted successfully')
|
130065
|
from flextensor.scheduler import schedule
from flextensor.task import register_task
from flextensor.utils import RpcInfo
# from flextensor.ppa_model import measure_latency
from flextensor.intrinsic import register_intrin
def gen_micro_schedule(task, target, model_func=None):
register_task(task, override=True)
# register_intrin(intrin, override=True)
rpc_info = RpcInfo(None, None)
rpc_info.target = target
s, bufs, config = schedule(
task.key,
slevel=2, rlevel=2,
parallel=8,
op_trial=10,
model_measurer=model_func,
rpc_info=rpc_info,
warm_up_epoch=2, warm_up_number=8
)
return s, bufs
|
130073
|
import pytest
from pytest import approx
from barril import units
@pytest.fixture
def db():
db = units.UnitDatabase.GetSingleton()
yield db
def testTransmissibility(db):
converted = db.Convert("transmissibility", "cp.m3/day/bar", "cp.bbl/day/psi", 1.0)
assert approx(converted) == 0.433667315
def testIndex(db):
category = db.GetCategoryInfo("index")
db.CheckQuantityTypeUnit(category.quantity_type, "<ind>")
def testAdsorptionRate(db):
db.CheckQuantityTypeUnit("adsorption rate", "mg/kg/d")
assert approx(db.Convert("adsorption rate", "mg/kg/d", "kg/kg/d", 1.0)) == 1.0e-6
def testSolubilityProduct(db):
db.CheckQuantityTypeUnit("solubility product", "(mol/m3)^2")
assert approx(db.Convert("solubility product", "(mol/L)^2", "(mol/m3)^2", 1.0)) == 1.0e6
def testMassConsumptionEfficiency(db):
db.CheckQuantityTypeUnit("mass consumption efficiency", "mg/l/mg/l")
db.CheckQuantityTypeUnit("mass consumption efficiency", "kg/m3/kg/m3")
def testDensityGeneration(db):
db.CheckQuantityTypeUnit("density generation", "mg/l/d")
assert approx(db.Convert("density generation", "mg/l/d", "kg/m3/d", 1.0)) == 0.001
def testMolarDensity(db):
db.CheckQuantityTypeUnit("amount of a substance", "gmole/m3")
db.CheckQuantityTypeUnit("concentration of B", "gmole/m3")
def testPartsPerMillionByVolumeConcentration(db):
assert (
approx(
db.Convert(
"parts per million by volume per concentration", "ppmv/mg/l", "ppmv/kg/m3", 1.0
)
)
== 1000
)
def testKPaPerSecond(db):
assert approx(db.Convert("dynamic viscosity", "Pa.s", "kPa.s", 1000.0)) == 1
def testPartsPerMillionVolume(db):
quantity_type = db.GetCategoryInfo("relative proportion").quantity_type
db.CheckQuantityTypeUnit(quantity_type, "ppmv")
def testPerTimeSquared(db):
quantity_type = db.GetCategoryInfo("per time squared").quantity_type
db.CheckQuantityTypeUnit(quantity_type, "1/d^2")
def testNoUnit(db):
db.CheckQuantityTypeUnit("dimensionless", "-")
def testFluidConsistencyQuantityAndConversions(db):
# Checking the quantity type.
db.CheckQuantityType("fluid consistency")
# Checking the units on quantity type.
db.CheckQuantityTypeUnit("fluid consistency", "Pa.s^n")
db.CheckQuantityTypeUnit("fluid consistency", "lbf.s^n/ft2")
db.CheckQuantityTypeUnit("fluid consistency", "eq.cp")
# Let's do some conversions.
def DoConversion(from_unit, to_unit, value):
return db.Convert("fluid consistency", from_unit, to_unit, value)
assert approx(DoConversion("Pa.s^n", "lbf.s^n/ft2", 1.0)) == 0.02088543
assert approx(DoConversion("Pa.s^n", "lbf.s^n/ft2", 5.0)) == 0.10442717
assert approx(DoConversion("Pa.s^n", "lbf.s^n/ft2", 5.5)) == 0.11486989
assert approx(DoConversion("Pa.s^n", "lbf.s^n/ft2", 250.0)) == 5.221358447
assert approx(DoConversion("Pa.s^n", "lbf.s^n/100ft2", 1.0)) == 2.08854338
assert approx(DoConversion("Pa.s^n", "lbf.s^n/100ft2", 5.0)) == 10.44271689
assert approx(DoConversion("Pa.s^n", "lbf.s^n/100ft2", 5.5)) == 11.48698858
assert approx(DoConversion("Pa.s^n", "lbf.s^n/100ft2", 250.0)) == 522.1358447
assert approx(DoConversion("Pa.s^n", "eq.cp", 1.0)) == 994.5238095
assert approx(DoConversion("Pa.s^n", "eq.cp", 5.0)) == 4972.6190475
assert approx(DoConversion("Pa.s^n", "eq.cp", 5.5)) == 5469.8809522
assert approx(DoConversion("Pa.s^n", "eq.cp", 250.0)) == 248630.952375
assert approx(DoConversion("lbf.s^n/ft2", "Pa.s^n", 1.0)) == 47.88026
assert approx(DoConversion("lbf.s^n/ft2", "Pa.s^n", 5.0)) == 239.4013
assert approx(DoConversion("lbf.s^n/ft2", "Pa.s^n", 5.5)) == 263.34143
assert approx(DoConversion("lbf.s^n/ft2", "Pa.s^n", 250.0)) == 11970.065
assert approx(DoConversion("lbf.s^n/ft2", "lbf.s^n/100ft2", 1.0)) == 100
assert approx(DoConversion("lbf.s^n/ft2", "lbf.s^n/100ft2", 5.0)) == 500
assert approx(DoConversion("lbf.s^n/ft2", "lbf.s^n/100ft2", 5.5)) == 550
assert approx(DoConversion("lbf.s^n/ft2", "lbf.s^n/100ft2", 250.0)) == 25000
assert approx(DoConversion("lbf.s^n/ft2", "eq.cp", 1.0)) == 47618.058575050469
assert approx(DoConversion("eq.cp", "Pa.s^n", 1.0), abs=1e7) == 0.0010055
assert approx(DoConversion("eq.cp", "Pa.s^n", 5.0), abs=1e7) == 0.0050275
assert approx(DoConversion("eq.cp", "Pa.s^n", 5.5), abs=1e7) == 0.0055303
assert approx(DoConversion("eq.cp", "Pa.s^n", 250.0)) == 0.2513766
assert approx(DoConversion("eq.cp", "lbf.s^n/ft2", 1.0), abs=1e6) == 0.000021
assert approx(DoConversion("eq.cp", "lbf.s^n/ft2", 5.0), abs=1e6) == 0.000105
assert approx(DoConversion("eq.cp", "lbf.s^n/ft2", 5.5), abs=1e6) == 0.0001155
assert approx(DoConversion("eq.cp", "lbf.s^n/ft2", 250.0)) == 0.00525011
assert approx(DoConversion("eq.cp", "lbf.s^n/100ft2", 1.0), abs=1e7) == 0.0021
assert approx(DoConversion("eq.cp", "lbf.s^n/100ft2", 5.0)) == 0.01050021
assert approx(DoConversion("eq.cp", "lbf.s^n/100ft2", 5.5)) == 0.011550239
assert approx(DoConversion("eq.cp", "lbf.s^n/100ft2", 250.0)) == 0.5250109
def testBblPerMeters(db):
assert approx(db.Convert("volume per length", "bbl/ft", "bbl/m", 1)) == 1 / 0.3048
def testBblPerSecond(db):
assert approx(db.Convert("volume flow rate", "bbl/hr", "bbl/s", 1)) == 1 / 3600
def testConcentration(db):
assert approx(db.Convert("concentration", "g/L", "mg/L", 1)) == 1000.0
def testVolumeFractionPerTemperature(db):
expected_result = db.Convert("volumetric thermal expansion", "1/degC", "1/degF", 1)
obtained_result = db.Convert(
"volume fraction per temperature", "(m3/m3)/degC", "(m3/m3)/degF", 1
)
assert approx(obtained_result) == expected_result
expected_units = ["(m3/m3)/K", "(m3/m3)/degC", "(m3/m3)/degF"]
obtained_result = sorted(db.GetValidUnits("volume fraction per temperature"))
assert obtained_result == expected_units
def testVolumePerWtpercent(db):
ft3_wtpercent_to_m3_wtpercent = db.Convert(
"volume per wtpercent", "ft3/wtpercent", "m3/wtpercent", 25 / 1
)
m3_to_wtpercent_to_ft3_to_wtpercent = db.Convert(
"volume per wtpercent", "m3/wtpercent", "ft3/wtpercent", 0.70792125
)
assert approx(ft3_wtpercent_to_m3_wtpercent) == 0.70792125
assert approx(m3_to_wtpercent_to_ft3_to_wtpercent) == 25
def testMassPerMol(db):
gpermol_to_kgpermol = db.Convert("mass per mol", "g/mol", "kg/mol", 1000 / 1)
kgpermol_to_gpermol = db.Convert("mass per mol", "kg/mol", "g/mol", 1)
assert approx(gpermol_to_kgpermol, 1)
assert approx(kgpermol_to_gpermol, 1000)
assert approx(db.Convert("mass per mol", "g/mol", "lb/lbmole", 1)) == 1
def testInjectivityFactor(db):
quantity_type = db.GetCategoryInfo("injectivity factor").quantity_type
db.CheckQuantityType(quantity_type)
db.CheckQuantityTypeUnit(quantity_type, "m3/s.Pa")
expected_units = ["m3/s.Pa", "bbl/min.psi", "(m3/d)/(kgf/cm2)"]
assert db.GetValidUnits("injectivity factor") == expected_units
converted_value = db.Convert("injectivity factor", "bbl/min.psi", "m3/s.Pa", 1)
# (from bbl/min to m3/s) / (from psi to Pa)
assert approx(converted_value) == 0.002649 / 6894.757
converted_value = db.Convert("injectivity factor", "(m3/d)/(kgf/cm2)", "m3/s.Pa", 1)
assert approx(converted_value) == 1.1802270983e-10
def testTemperaturePerLengthdegCP30m():
u1 = units.Scalar("temperature per length", 1.0, "K/m")
u2 = units.Scalar("temperature per length", 30.0, "degC/30m")
assert approx(u1.GetValue("degC/30m")) == 30.0
assert approx(u2.GetValue("K/m")) == 1.0
def testMassPerTimePerArea(db):
secs_to_days = 60 * 60 * 24
assert approx(db.Convert("mass per time per area", "kg/m2.s", "kg/m2.d", 1)) == 1 * secs_to_days
def testAccelerationLinearMetersPerMin2(db):
assert approx(db.Convert("acceleration linear", "m/s2", "m/min2", 2)) == 7200
assert approx(db.Convert("acceleration linear", "m/min2", "m/s2", 10800)) == 3
def testAccelerationLinearFeetPerMin2(db):
assert approx(db.Convert("acceleration linear", "ft/s2", "ft/min2", 2)) == 7200
assert approx(db.Convert("acceleration linear", "ft/min2", "ft/s2", 10800)) == 3
def testMassConcentration(db):
assert approx(db.Convert("mass concentration", "Euc", "g/g", 2)) == 2
assert approx(db.Convert("mass concentration", "Euc", "g/100g", 2)) == 200
assert approx(db.Convert("mass concentration", "Euc", "lbm/lbm", 2)) == 2
def testSpecificVolume(db):
assert approx(db.Convert("specific volume", "m3/kg", "l/mg", 1.0)) == 0.001
def testViscosityPerPressure(db):
assert approx(db.Convert("viscosity per pressure", "cP/kPa", "cP/psi", 10.0)) == 68.9475729
assert approx(db.Convert("viscosity per pressure", "cP/kPa", "cP/(kgf/cm2)", 10.0)) == 980.665
assert (
approx(db.Convert("viscosity per pressure", "cP/(kgf/cm2)", "cP/psi", 0.102), abs=1e7)
== 0.0071713
)
# maybe it's from this wrong conversion where IMEX limit of 0.102 comes from:
assert approx(db.Convert("viscosity per pressure", "cP/(kgf/cm2)", "cP/kPa", 10.0)) == 0.1019716
def testForcePerLength(db):
assert approx(db.Convert("force per length", "N/m", "kgf/m", 1.0)) == 0.101971621
def testMiligramPerGram(db):
assert approx(db.Convert("mass concentration", "mg/g", "g/g", 2.0)) == 0.002
def testGramsPerBarrel(db):
db.CheckQuantityTypeUnit("concentration", "g/bbl")
def testSiemens(db):
assert approx(db.Convert("conductivity", "S/m", "uS/m", 1.0)) == 1000000.0
def testStrokeFrequency(db):
db.CheckQuantityTypeUnit("stroke frequency", "spm")
assert approx(db.Convert("stroke frequency", "spm", "sps", 150.0)) == 2.5
def testPowerPerWeight(db):
db.CheckQuantityTypeUnit("power per mass", "W/kg")
db.CheckQuantityTypeUnit("power per weight", "W/kg")
assert approx(db.Convert("power per weight", "W/kg", "kW/kg", 1.0)) == 1.0 / 1000.0
assert approx(db.Convert("power per weight", "kW/kg", "W/kg", 1.0)) == 1000.0
def testSelfInductance(db):
assert approx(db.Convert("self inductance per length", "H/m", "H/km", 1.0), rel=0.01) == 1000
assert (
approx(db.Convert("self inductance per length", "H/m", "mH/km", 1.0), rel=0.01) == 1000000
)
assert (
approx(db.Convert("self inductance per length", "H/m", "uH/km", 1.0), rel=0.01)
== 1000000000
)
assert (
approx(db.Convert("self inductance per length", "H/m", "nH/km", 1.0), rel=0.01)
== 1000000000000
)
assert approx(db.Convert("self inductance per length", "H/m", "mH/m", 1.0), rel=0.01) == 1000
assert approx(db.Convert("self inductance per length", "H/m", "uH/m", 1.0), rel=0.01) == 1000000
assert (
approx(db.Convert("self inductance per length", "H/m", "nH/m", 1.0), rel=0.01) == 1000000000
)
def testConcentrationPerSquareTime(db):
db.CheckQuantityTypeUnit("concentration per square time", "mg/l/d2")
db.CheckQuantityTypeUnit("concentration per square time", "kg/m3/d2")
assert (
approx(db.Convert("concentration per square time", "mg/l/d2", "kg/m3/d2", 1.0))
== 1.0 / 1000.0
)
assert approx(db.Convert("concentration per square time", "kg/m3/d2", "mg/l/d2", 1.0)) == 1000.0
def testVolumeInCubicMicrometres(db):
obtained = db.Convert("volume", "m3", "um3", 1.0)
expected = 1e18
assert approx(obtained, rel=1e-12) == expected
def testVolumeFlowRateInCubicMicrometresPerSecond(db):
obtained = db.Convert("volume flow rate", "m3/s", "um3/s", 1.0)
expected = 1e18
assert approx(obtained, rel=1e-12) == expected
def testFlowCoefficient(db):
obtained = db.Convert("flow coefficient", "(galUS/min)/(psi^0.5)", "(m3/s)/(Pa^0.5)", 1.0)
expected = 7.59805421208337e-07
assert approx(obtained, rel=1e-12) == expected
obtained = db.Convert("flow coefficient", "(m3/h)/(bar^0.5)", "(m3/s)/(Pa^0.5)", 1.0)
expected = 8.784104611578831e-07
assert approx(obtained, rel=1e-12) == expected
def testHertzPerSecond():
from barril.units import Scalar
assert approx(Scalar(1, "rpm").GetValue("Hz")) == 1 / 60.0
assert approx(Scalar(1, "Hz").GetValue("rpm")) == 60.0
assert approx(Scalar(1, "Hz/s").GetValue("rpm/s")) == 60.0
assert approx(Scalar(1, "rpm/s").GetValue("Hz/s")) == 1 / 60.0
def testFluidGasConcentration():
from barril.units import Scalar
assert approx(Scalar(1, "tgu").GetValue("ppm")) == 333.33
assert approx(Scalar(1, "tgu").GetValue("%")) == 0.033333
def testPerMicrometre(db):
per_micrometre = units.Scalar("per length", 1.0, "1/um")
per_metre = per_micrometre.CreateCopy(unit="1/m")
per_inch = per_micrometre.CreateCopy(unit="1/in")
assert per_micrometre.GetValue("1/m") == 10 ** 6
assert approx(per_metre.GetValue()) == 10 ** 6
assert approx(per_inch.GetValue()) == 25400
assert per_metre == units.Scalar("per length", 10 ** 6, "1/m")
assert per_inch == units.Scalar("per length", 25400.0, "1/in")
|
130088
|
import pytest
@pytest.fixture(autouse=True)
def _autouse_resp_mocker(resp_mocker, version_api):
pass
|
130290
|
from linode_api4.errors import UnexpectedResponseError
from linode_api4.objects import Base, Property
class AuthorizedApp(Base):
api_endpoint = "/profile/apps/{id}"
properties = {
"id": Property(identifier=True),
"scopes": Property(),
"label": Property(),
"created": Property(is_datetime=True),
"expiry": Property(is_datetime=True),
"thumbnail_url": Property(),
"website": Property(),
}
class PersonalAccessToken(Base):
api_endpoint = "/profile/tokens/{id}"
properties = {
"id": Property(identifier=True),
"scopes": Property(),
"label": Property(mutable=True),
"created": Property(is_datetime=True),
"token": Property(),
"expiry": Property(is_datetime=True),
}
class WhitelistEntry(Base):
api_endpoint = "/profile/whitelist/{id}"
properties = {
'id': Property(identifier=True),
'address': Property(),
'netmask': Property(),
'note': Property(),
}
class Profile(Base):
api_endpoint = "/profile"
id_attribute = 'username'
properties = {
'username': Property(identifier=True),
'uid': Property(),
'email': Property(mutable=True),
'timezone': Property(mutable=True),
'email_notifications': Property(mutable=True),
'referrals': Property(),
'ip_whitelist_enabled': Property(mutable=True),
'lish_auth_method': Property(mutable=True),
'authorized_keys': Property(mutable=True),
'two_factor_auth': Property(),
'restricted': Property(),
}
def enable_tfa(self):
"""
Enables TFA for the token's user. This requies a follow-up request
to confirm TFA. Returns the TFA secret that needs to be confirmed.
"""
result = self._client.post('/profile/tfa-enable')
return result['secret']
def confirm_tfa(self, code):
"""
Confirms TFA for an account. Needs a TFA code generated by enable_tfa
"""
self._client.post('/profile/tfa-enable-confirm', data={
"tfa_code": code
})
return True
def disable_tfa(self):
"""
Turns off TFA for this user's account.
"""
self._client.post('/profile/tfa-disable')
return True
@property
def grants(self):
"""
Returns grants for the current user
"""
from linode_api4.objects.account import UserGrants # pylint: disable-all
resp = self._client.get('/profile/grants') # use special endpoint for restricted users
grants = None
if resp is not None:
# if resp is None, we're unrestricted and do not have grants
grants = UserGrants(self._client, self.username, resp)
return grants
@property
def whitelist(self):
"""
Returns the user's whitelist entries, if whitelist is enabled
"""
return self._client._get_and_filter(WhitelistEntry)
def add_whitelist_entry(self, address, netmask, note=None):
"""
Adds a new entry to this user's IP whitelist, if enabled
"""
result = self._client.post("{}/whitelist".format(Profile.api_endpoint),
data={
"address": address,
"netmask": netmask,
"note": note,
})
if not 'id' in result:
raise UnexpectedResponseError("Unexpected response creating whitelist entry!")
return WhitelistEntry(result['id'], self._client, json=result)
class SSHKey(Base):
"""
An SSH Public Key uploaded to your profile for use in Linode Instance deployments.
"""
api_endpoint = '/profile/sshkeys/{id}'
properties = {
"id": Property(identifier=True),
"label": Property(mutable=True),
"ssh_key": Property(),
"created": Property(is_datetime=True),
}
|
130329
|
import gaphas
import pytest
from gaphor.core.modeling import Diagram, Presentation, StyleSheet
class Example(gaphas.Element, Presentation):
def __init__(self, diagram, id):
super().__init__(connections=diagram.connections, diagram=diagram, id=id)
def unlink(self):
self.test_unlinked = True
super().unlink()
class ExampleLine(gaphas.Line, Presentation):
def __init__(self, diagram, id):
super().__init__(connections=diagram.connections, diagram=diagram, id=id)
def unlink(self):
self.test_unlinked = True
super().unlink()
@pytest.fixture
def diagram(element_factory):
return element_factory.create(Diagram)
def test_diagram_can_be_used_as_gtkview_model():
diagram = Diagram("id", None)
assert isinstance(diagram, gaphas.view.model.Model)
def test_canvas_is_saved():
diagram = Diagram("id", None)
saved_keys = []
diagram.save(lambda name, val: saved_keys.append(name))
assert "canvas" not in saved_keys
def test_canvas_item_is_created(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
assert example in diagram.get_all_items()
assert example.diagram is diagram
def test_canvas_is_unlinked(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
diagram.unlink()
assert example.test_unlinked
def test_can_only_add_diagram_items(element_factory):
diagram = element_factory.create(Diagram)
with pytest.raises(TypeError):
diagram.create(Diagram)
def test_diagram_stylesheet(element_factory):
diagram = element_factory.create(Diagram)
styleSheet = element_factory.create(StyleSheet)
assert diagram.styleSheet is styleSheet
class ViewMock:
def __init__(self):
self.removed_items = set()
def request_update(self, items, removed_items) -> None:
self.removed_items.update(removed_items)
def test_remove_presentation_triggers_view(element_factory):
diagram = element_factory.create(Diagram)
view = ViewMock()
diagram.register_view(view)
example = diagram.create(Example)
example.unlink()
assert example.diagram is None
assert example not in diagram.ownedPresentation
assert example in view.removed_items
def test_order_presentations_lines_are_last(diagram):
example_line = diagram.create(ExampleLine)
example = diagram.create(Example)
assert list(diagram.get_all_items()) == [example, example_line]
def test_order_presentations_line_is_grouped(diagram):
example_line = diagram.create(ExampleLine)
example_1 = diagram.create(Example)
example_2 = diagram.create(Example)
example_line.parent = example_1
assert list(diagram.get_all_items()) == [example_1, example_2, example_line]
def test_order_grouped_presentations(diagram):
example_1 = diagram.create(Example)
example_2 = diagram.create(Example)
example_1.parent = example_2
assert list(diagram.get_all_items()) == [example_2, example_1]
|
130411
|
from django.conf.urls import url
from .views import (
LocationSearchView,
)
urlpatterns = [
url(r'^create-location/$', LocationSearchView.as_view(), name='location_create'),
]
|
130418
|
import pylab
import numpy
class GeneralRandom:
"""This class enables us to generate random numbers with an arbitrary
distribution."""
def __init__(self, x = pylab.arange(-1.0, 1.0, .01), p = None, Nrl = 1000):
"""Initialize the lookup table (with default values if necessary)
Inputs:
x = random number values
p = probability density profile at that point
Nrl = number of reverse look up values between 0 and 1"""
if p == None:
p = pylab.exp(-10*x**2.0)
self.set_pdf(x, p, Nrl)
def set_pdf(self, x, p, Nrl = 1000):
"""Generate the lookup tables.
x is the value of the random variate
pdf is its probability density
cdf is the cumulative pdf
inversecdf is the inverse look up table
"""
self.x = x
self.pdf = p/p.sum() #normalize it
self.cdf = self.pdf.cumsum()
self.inversecdfbins = Nrl
self.Nrl = Nrl
y = pylab.arange(Nrl)/float(Nrl)
delta = 1.0/Nrl
self.inversecdf = pylab.zeros(Nrl)
self.inversecdf[0] = self.x[0]
cdf_idx = 0
for n in xrange(1,self.inversecdfbins):
while self.cdf[cdf_idx] < y[n] and cdf_idx < Nrl:
cdf_idx += 1
self.inversecdf[n] = self.x[cdf_idx-1] + (self.x[cdf_idx] - self.x[cdf_idx-1]) * (y[n] - self.cdf[cdf_idx-1])/(self.cdf[cdf_idx] - self.cdf[cdf_idx-1])
if cdf_idx >= Nrl:
break
self.delta_inversecdf = pylab.concatenate((pylab.diff(self.inversecdf), [0]))
def random(self, N = 1000):
"""Give us N random numbers with the requested distribution"""
idx_f = numpy.random.uniform(size = N, high = self.Nrl-1)
idx = pylab.array([idx_f],'i')
y = self.inversecdf[idx] + (idx_f - idx)*self.delta_inversecdf[idx]
return y
def plot_pdf(self):
pylab.plot(self.x, self.pdf)
def self_test(self, N = 1000):
pylab.figure()
#The cdf
pylab.subplot(2,2,1)
pylab.plot(self.x, self.cdf)
#The inverse cdf
pylab.subplot(2,2,2)
y = pylab.arange(self.Nrl)/float(self.Nrl)
pylab.plot(y, self.inversecdf)
#The actual generated numbers
pylab.subplot(2,2,3)
y = self.random(N)
p1, edges = pylab.histogram(y, bins = 50,
range = (self.x.min(), self.x.max()),
normed = True, new = True)
x1 = 0.5*(edges[0:-1] + edges[1:])
pylab.plot(x1, p1/p1.max())
pylab.plot(self.x, self.pdf/self.pdf.max())
|
130453
|
from typing import List
import numpy as np
class BenchmarkResult:
def __init__(self, loop_names: List[str], n_repeats: int, metric_names: List[str]):
"""
:param loop_names: List of loop names
:param n_repeats: Number of random restarts in benchmarking
:param metric_names: List of metric names
"""
self.loop_names = loop_names
self.n_repeats = n_repeats
self.metric_names = metric_names
self._results = dict()
for loop_name in loop_names:
self._results[loop_name] = dict()
for metric_name in metric_names:
self._results[loop_name][metric_name] = []
for i in range(n_repeats):
self._results[loop_name][metric_name].append([])
def add_results(self, loop_name: str, i_repeat: int, metric_name: str, metric_values: np.ndarray) -> None:
"""
Add results for a specific loop, metric and repeat combination
:param loop_name: Name of loop
:param i_repeat: Index of repeat
:param metric_name: Name of metric
:param metric_values: Metric values to add
"""
self._results[loop_name][metric_name][i_repeat] = metric_values.flatten()
def extract_metric_as_array(self, loop_name: str, metric_name: str) -> np.ndarray:
"""
Returns results over all repeats and iterations for a specific metric and loop name pair
:param loop_name: Name of loop to return results for
:param metric_name: Name of metric to extract
:return: 2-d numpy array of shape (n_repeats x n_iterations)
"""
return np.array(self._results[loop_name][metric_name])
|
130531
|
from disco.core import result_iterator
from disco.error import DataError
from disco.test import TestCase, TestJob, FailedReply
class RedundantJob(TestJob):
@staticmethod
def map(e, params):
yield int(e), ''
@staticmethod
def reduce(iter, params):
yield sum(k for k, v in iter), ''
class RedundantTestCase(TestCase):
def serve(self, path):
if 'fail' in path:
raise FailedReply()
return '{0}\n'.format(int(path) * 10)
def runTest(self):
input = ['1', ['2_fail', '2_still_fail', '200'], '3', ['4_fail', '400']]
self.job = RedundantJob().run(input=self.test_server.urls(input))
self.assertResults(self.job, [(6040, '')])
class RedundantOutputTestCase(TestCase):
# This is a tricky test case now that comm.py tries really
# hard to access the url, which in this case doesn't exist
# (http://nonode). The test could take almost 10 minutes.
# We should have a way to lower the number of retries
# globally.
"""
def test_unavailable(self):
from disco.schemes import scheme_raw
results = list(result_iterator([['http://nonode', 'raw://hello']],
reader=scheme_raw.input_stream))
self.assertEquals(results, ['hello'])
"""
def test_corrupt(self):
def corrupt_reader(fd, size, url, params):
yield 'hello'
if 'corrupt' in url:
raise DataError("Corrupt!", url)
yield 'there'
self.assertAllEqual(result_iterator([['raw://corrupt'] * 9 +
['raw://decent']],
reader=corrupt_reader),
['hello', 'there'])
|
130603
|
from guizero import App, TextBox, PushButton, Text
def show():
output.value = textbox.value
app = App()
textbox = TextBox(app, multiline=True, height=10, width=50, scrollbar=True)
textbox.value = "hello\ngoodbye\nno way\nthis is a very long stream of text, very long indeed, the best long line of text, its super bigly and very long, I dont think it could possibly be any better particularly as it was created by someone who is super good at creating long lines of text."
button = PushButton(app, text="Print", command=show)
output = Text(app)
app.display()
|
130610
|
import logging
import random
import time
from ..candidate_generation import candidates_per_query, syntactically_relevant_indexes
from ..selection_algorithm import DEFAULT_PARAMETER_VALUES, SelectionAlgorithm
from ..utils import get_utilized_indexes, mb_to_b
# budget_MB: The algorithm can utilize the specified storage budget in MB.
# max_index_width: The number of columns an index can contain at maximum.
# try_variations_seconds: Time spent in TryVariations phase. See the original paper
# for further details
# try_variations_max_removals: Maximum number of index candidates that are remover per
# TryVariations step.
# The algorithm stops if the budget & the time for the TryVariations phase are exceeded.
DEFAULT_PARAMETERS = {
"budget_MB": DEFAULT_PARAMETER_VALUES["budget_MB"],
"max_index_width": DEFAULT_PARAMETER_VALUES["max_index_width"],
"try_variations_seconds": 10,
"try_variations_max_removals": 4,
}
# This algorithm resembles the index selection algorithm published in 2000 by Valentin
# et al. Details can be found in the original paper:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>:
# DB2 Advisor: An Optimizer Smart Enough to Recommend Its Own Indexes. ICDE 2000: 101-110
#
# Please note, that this implementation does not reflect the behavior and performance
# of the original algorithm, which might be continuously enhanced and optimized.
class DB2AdvisAlgorithm(SelectionAlgorithm):
def __init__(self, database_connector, parameters=None):
if parameters is None:
parameters = {}
SelectionAlgorithm.__init__(
self, database_connector, parameters, DEFAULT_PARAMETERS
)
self.disk_constraint = mb_to_b(self.parameters["budget_MB"])
self.try_variations_seconds = self.parameters["try_variations_seconds"]
self.try_variations_max_removals = self.parameters["try_variations_max_removals"]
def _calculate_best_indexes(self, workload):
logging.info("Calculating best indexes DB2Advis")
# The chosen generator is similar to the original "BFI" and
# uses all syntactically relevant indexes.
candidates = candidates_per_query(
workload,
self.parameters["max_index_width"],
candidate_generator=syntactically_relevant_indexes,
)
utilized_indexes, query_details = get_utilized_indexes(
workload, candidates, self.cost_evaluation, True
)
index_benefits = self._calculate_index_benefits(utilized_indexes, query_details)
index_benefits_subsumed = self._combine_subsumed(index_benefits)
selected_index_benefits = []
disk_usage = 0
for index_benefit in index_benefits_subsumed:
if disk_usage + index_benefit.size() <= self.disk_constraint:
selected_index_benefits.append(index_benefit)
disk_usage += index_benefit.size()
if self.try_variations_seconds > 0:
selected_index_benefits = self._try_variations(
selected_index_benefits, index_benefits_subsumed, workload
)
return [index_benefit.index for index_benefit in selected_index_benefits]
def _calculate_index_benefits(self, candidates, query_results):
indexes_benefit = []
for index_candidate in candidates:
benefit = 0
for query, value in query_results.items():
if index_candidate not in value["utilized_indexes"]:
continue
# TODO adjust when having weights for queries
benefit += value["cost_without_indexes"] - value["cost_with_indexes"]
indexes_benefit.append(IndexBenefit(index_candidate, benefit))
return sorted(indexes_benefit, reverse=True)
# From the paper: "Combine any index subsumed
# by an index with a higher ratio with that index."
# The input must be a sorted list of IndexBenefit objects.
# E.g., the output of _calculate_index_benefits()
def _combine_subsumed(self, index_benefits):
# There is no point in subsuming with less than two elements
if len(index_benefits) < 2:
return index_benefits
assert index_benefits == sorted(
index_benefits,
reverse=True,
key=lambda index_benefit: index_benefit.benefit_size_ratio(),
), "the input of _combine_subsumed must be sorted"
index_benefits_to_remove = set()
for high_ratio_pos, index_benefit_high_ratio in enumerate(index_benefits):
if index_benefit_high_ratio in index_benefits_to_remove:
continue
# Test all following elements (with lower ratios) in the list
iteration_pos = high_ratio_pos + 1
for index_benefit_lower_ratio in index_benefits[iteration_pos:]:
if index_benefit_lower_ratio in index_benefits_to_remove:
continue
if index_benefit_high_ratio.index.subsumes(
index_benefit_lower_ratio.index
):
index_benefit_high_ratio.benefit += index_benefit_lower_ratio.benefit
index_benefits_to_remove.add(index_benefit_lower_ratio)
result_set = set(index_benefits) - index_benefits_to_remove
# Sorting of a set results in a list
return sorted(result_set, reverse=True)
def _try_variations(self, selected_index_benefits, index_benefits, workload):
logging.debug(f"Try variation for {self.try_variations_seconds} seconds")
start_time = time.time()
not_used_index_benefits = set(index_benefits) - set(selected_index_benefits)
min_length = min(len(selected_index_benefits), len(not_used_index_benefits))
if self.try_variations_max_removals > min_length:
self.try_variations_max_removals = min_length
if self.try_variations_max_removals == 0:
return selected_index_benefits
current_cost = self._evaluate_workload(selected_index_benefits, workload)
logging.debug(f"Initial cost \t{current_cost}")
selected_index_benefits_set = set(selected_index_benefits)
while start_time + self.try_variations_seconds > time.time():
number_of_exchanges = (
random.randrange(1, self.try_variations_max_removals)
if self.try_variations_max_removals > 1
else 1
)
indexes_to_remove = frozenset(
random.sample(selected_index_benefits_set, k=number_of_exchanges)
)
new_variaton = set(selected_index_benefits_set - indexes_to_remove)
new_variation_size = sum(
[index_benefit.size() for index_benefit in new_variaton]
)
indexes_to_add = random.sample(not_used_index_benefits, k=number_of_exchanges)
assert len(indexes_to_add) == len(
indexes_to_remove
), "_try_variations must remove the same number of indexes that are added."
for index_benefit in indexes_to_add:
if index_benefit.size() + new_variation_size > self.disk_constraint:
continue
new_variaton.add(index_benefit)
new_variation_size += index_benefit.size()
cost_of_variation = self._evaluate_workload(new_variaton, workload)
if cost_of_variation < current_cost:
logging.debug(f"Lower cost found \t{current_cost}")
current_cost = cost_of_variation
selected_index_benefits_set = new_variaton
return selected_index_benefits_set
def _evaluate_workload(self, index_benefits, workload):
index_candidates = [index_benefit.index for index_benefit in index_benefits]
return self.cost_evaluation.calculate_cost(workload, index_candidates)
class IndexBenefit:
def __init__(self, index, benefit):
self.index = index
self.benefit = benefit
def __eq__(self, other):
if not isinstance(other, IndexBenefit):
return False
return other.index == self.index and self.benefit == other.benefit
def __lt__(self, other):
self_ratio = self.benefit_size_ratio()
other_ratio = other.benefit_size_ratio()
# For reproducible results, we also compare the index objects if the ratios
# are equal
if self_ratio == other_ratio:
return self.index < other.index
return self_ratio < other_ratio
def __hash__(self):
return hash((self.index, self.benefit))
def __repr__(self):
return f"IndexBenefit({self.index}, {self.benefit})"
def size(self):
return self.index.estimated_size
def benefit_size_ratio(self):
return self.benefit / self.size()
|
130641
|
from __future__ import print_function
from sys import stdout, stderr
from pepper.framework.abstract import AbstractComponent
from pepper.framework.util import Scheduler
from pepper.framework.component import SpeechRecognitionComponent
from pepper import config
import threading
import urllib
from time import time
class StatisticsComponent(AbstractComponent):
"""
Display Realtime Application Performance Statistics
Parameters
----------
backend: AbstractBackend
Application Backend
"""
PERFORMANCE_ERROR_THRESHOLD = 0.8
LIVE_SPEECH = ""
LIVE_SPEECH_TIMEOUT = 3
LIVE_SPEECH_TIME = 0
def __init__(self, backend):
super(StatisticsComponent, self).__init__(backend)
# Require Speech Recognition Component and Get Information from it
speech_recognition = self.require(StatisticsComponent, SpeechRecognitionComponent) # type: SpeechRecognitionComponent
vad, asr = speech_recognition.vad, speech_recognition.asr
def worker():
# Create Voice Activation Bar
activation = int(vad.activation * 10)
activation_print = "|" * activation + "." * (10 - activation)
voice_print = ("<{:10s}>" if vad._voice else "[{:10s}]").format(activation_print)
empty_voice_print = "[ ]"
# Get Microphone Related Information
mic_running = self.backend.microphone.running
mic_rate = self.backend.microphone.rate
mic_rate_true = self.backend.microphone.true_rate
# Get Camera Related Information
cam_rate = self.backend.camera.rate
cam_rate_true = self.backend.camera.true_rate
# If Camera and/or Microphone are not running as fast as expected -> show stderr message instead of stdout
error = (cam_rate_true < cam_rate * self.PERFORMANCE_ERROR_THRESHOLD or
mic_rate_true < float(mic_rate) * self.PERFORMANCE_ERROR_THRESHOLD)
# Show Speech to Text Transcript 'live' as it happens
if asr.live:
self.LIVE_SPEECH = asr.live
self.LIVE_SPEECH_TIME = time()
elif time() - self.LIVE_SPEECH_TIME > self.LIVE_SPEECH_TIMEOUT:
self.LIVE_SPEECH = ""
# Display Statistics
print("\rThreads {:2d} | Cam {:4.1f} Hz | Mic {:4.1f} kHz | STT {:12s} >>> {}".format(
threading.active_count(),
cam_rate_true,
mic_rate_true / 1000.0,
voice_print if mic_running else empty_voice_print,
self.LIVE_SPEECH),
end="", file=(stderr if error else stdout))
# Run 10 times a second
# TODO: Bit Much?
schedule = Scheduler(worker, 0.1)
schedule.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.