id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11530897
|
from rx.core import Observable
from rx.concurrency import timeout_scheduler
from rx.internal.utils import TimeInterval
from rx.internal import extensionmethod
@extensionmethod(Observable)
def time_interval(self, scheduler=None):
"""Records the time interval between consecutive values in an
observable sequence.
1 - res = source.time_interval();
2 - res = source.time_interval(Scheduler.timeout)
Keyword arguments:
scheduler -- [Optional] Scheduler used to compute time intervals. If
not specified, the timeout scheduler is used.
Return An observable sequence with time interval information on values.
"""
source = self
scheduler = scheduler or timeout_scheduler
def defer():
last = [scheduler.now]
def selector(x):
now = scheduler.now
span = now - last[0]
last[0] = now
return TimeInterval(value=x, interval=span)
return source.map(selector)
return Observable.defer(defer)
|
11530966
|
import torch
from .type_dist import CharacterTypeDist
from .token_dist import CharacterTokenDist
from .image_dist import CharacterImageDist
class CharacterModel(object):
"""
Sampling from and Scoring according to the graphical model. The model is
defined as P(Type, Token, Image) = P(Type)*P(Token | Type)*P(Image | Token).
The 3 component distributions P(Type), P(Token | Type), and P(Image | Token)
are denoted 'type_dist', 'token_dist' and 'image_dist', respectively.
"""
def __init__(self, lib):
self.type_dist = CharacterTypeDist(lib)
self.token_dist = CharacterTokenDist(lib)
self.image_dist = CharacterImageDist(lib)
def sample_type(self, k=None):
return self.type_dist.sample_type(k)
def score_type(self, ctype):
return self.type_dist.score_type(ctype)
def sample_token(self, ctype):
return self.token_dist.sample_token(ctype)
def score_token(self, ctype, ctoken):
return self.token_dist.score_token(ctype, ctoken)
def sample_image(self, ctoken):
return self.image_dist.sample_image(ctoken)
def score_image(self, ctoken, image):
return self.image_dist.score_image(ctoken, image)
def get_pimg(self, ctoken):
return self.image_dist.get_pimg(ctoken)
def fit_image(im, lib):
# Optimization would look something like this
model = CharacterModel(lib)
_type = model.sample_type()
token = model.sample_token(_type)
optimizer = torch.optim.Adam([{'params': _type.parameters()},
{'params': token.parameters()}],
lr=0.001)
# Set requires_grad to True
_type.train()
token.train()
for idx in range(100):
optimizer.zero_grad()
type_score = model.score_type(_type)
token_score = model.score_token(_type,token)
image_score = model.score_image(token,im)
score = type_score + token_score + image_score
loss = -score
loss.backward()
optimizer.step()
|
11530989
|
from fastapi import Depends, HTTPException
from starlette.status import HTTP_401_UNAUTHORIZED
from starlette.requests import Request
from app.database.models import User
from app.dependencies import get_db
from app.internal.security.ouath2 import (
Session, get_jwt_token, get_authorization_cookie
)
from app.internal.security import schema
async def is_logged_in(
request: Request, db: Session = Depends(get_db),
jwt: str = Depends(get_authorization_cookie)) -> bool:
"""
A dependency function protecting routes for only logged in user
"""
await get_jwt_token(db, jwt)
return True
async def is_manager(
request: Request, db: Session = Depends(get_db),
jwt: str = Depends(get_authorization_cookie)) -> bool:
"""
A dependency function protecting routes for only logged in manager
"""
jwt_payload = await get_jwt_token(db, jwt)
if jwt_payload.get("is_manager"):
return True
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
headers=request.url.path,
detail="You don't have a permition to enter this page")
async def current_user_from_db(
request: Request,
db: Session = Depends(get_db),
jwt: str = Depends(get_authorization_cookie),
) -> User:
"""
Returns logged in User object.
A dependency function protecting routes for only logged in user.
"""
jwt_payload = await get_jwt_token(db, jwt)
username = jwt_payload.get("sub")
user_id = jwt_payload.get("user_id")
db_user = await User.get_by_username(db, username=username)
if db_user and db_user.id == user_id:
return db_user
else:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
headers=request.url.path,
detail="Your token is incorrect. Please log in again")
async def current_user(
request: Request,
db: Session = Depends(get_db),
jwt: str = Depends(get_authorization_cookie),
) -> schema:
"""
Returns logged in User object.
A dependency function protecting routes for only logged in user.
"""
jwt_payload = await get_jwt_token(db, jwt)
username = jwt_payload.get("sub")
user_id = jwt_payload.get("user_id")
return schema.CurrentUser(user_id=user_id, username=username)
|
11531024
|
from .model import KerasModel
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import BatchNormalization, Dropout, Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Dropout, Flatten
from keras.layers import merge, Conv2D, MaxPooling2D, Input
from keras.layers.advanced_activations import PReLU
from keras.layers import Activation
from keras.models import Model
class RCNN(KerasModel):
# TODO: why is this called RCL?
def RCL(self,l, a):
# TODO: documentation
# first convolutional layer
conv1 = Conv2D(filters=128, kernel_size=(1, 9), strides=(1, 1), padding='same', data_format='channels_last',
init='he_normal')(l)
bn1 = BatchNormalization(epsilon=0.000001)(conv1)
relu1 = PReLU()(bn1)
pool1 = MaxPooling2D(pool_size=(1, 4), strides=(1, 4), padding='valid', data_format='channels_last')(relu1)
drop1 = Dropout(0)(pool1)
# start first RCL layer
# the second time convolution and stored for recurrent
conv2 = Conv2D(filters=128, kernel_size=(1, 1), padding='same', init='he_normal')(drop1)
bn2 = BatchNormalization(axis=1, epsilon=0.000001)(conv2)
relu2 = PReLU()(bn2)
# first recurrent for the first convolution
conv2a = Conv2D(filters=128, kernel_size=(1, 9), padding='same', init='he_normal')
conv2aa = conv2a(relu2)
merged2a = merge([conv2, conv2aa], mode='sum')
# second recurrent for the first convolution
bn2a = BatchNormalization(axis=1, epsilon=0.000001)(merged2a)
relu2a = PReLU()(bn2a)
conv2b = Conv2D(filters=128, kernel_size=(1, 9), padding='same', weights=conv2a.get_weights())(relu2a)
merged2b = merge([conv2, conv2b], mode='sum')
# third recurrent for the first convolution
bn2b = BatchNormalization(axis=1, epsilon=0.000001)(merged2b)
relu2b = PReLU()(bn2b)
conv2c = Conv2D(filters=128, kernel_size=(1, 9), padding='same', weights=conv2a.get_weights())(relu2b)
merged2c = merge([conv2, conv2c], mode='sum')
bn2c = BatchNormalization(axis=1, epsilon=0.000001)(merged2c)
relu2c = PReLU()(bn2c)
pool2 = MaxPooling2D(pool_size=(1, 4), strides=(1, 4), padding='valid', data_format='channels_last')(relu2c)
drop2 = Dropout(0.2)(pool2)
conv3 = Conv2D(filters=128, kernel_size=(1, 1), padding='same')(drop2)
bn3 = BatchNormalization(axis=1, epsilon=0.000001)(conv3)
relu3 = PReLU()(bn3)
conv3a = Conv2D(filters=128, kernel_size=(1, 9), padding='same', init='he_normal')
conv3aa = conv3a(relu3)
merged3a = merge([conv3, conv3aa], mode='sum')
bn3a = BatchNormalization(axis=1, epsilon=0.000001)(merged3a)
relu3a = PReLU()(bn3a)
conv3b = Conv2D(filters=128, kernel_size=(1, 9), padding='same', weights=conv3a.get_weights())(relu3a)
merged3b = merge([conv3, conv3b], mode='sum')
bn3b = BatchNormalization(axis=1, epsilon=0.000001)(merged3b)
relu3b = PReLU()(bn3b)
conv3c = Conv2D(filters=128, kernel_size=(1, 9), padding='same', weights=conv3a.get_weights())(relu3b)
merged3c = merge([conv3, conv3c], mode='sum')
bn3c = BatchNormalization(axis=1, epsilon=0.000001)(merged3c)
relu3c = PReLU()(bn3c)
pool3 = MaxPooling2D(pool_size=(1, 4), strides=(1, 4), padding='valid', data_format='channels_last')(relu3c)
drop3 = Dropout(0.2)(pool3)
conv4 = Conv2D(filters=128, kernel_size=(1, 1), padding='same', init='he_normal')(drop3)
bn4 = BatchNormalization(axis=1, epsilon=0.000001)(conv4)
relu4 = PReLU()(bn4)
conv4a = Conv2D(filters=128, kernel_size=(1, 9), padding='same')
conv4aa = conv4a(relu4)
merged4a = merge([conv4, conv4aa], mode='sum')
bn4a = BatchNormalization(axis=1, epsilon=0.000001)(merged4a)
relu4a = PReLU()(bn4a)
conv4b = Conv2D(filters=128, kernel_size=(1, 9), padding='same', weights=conv4a.get_weights())(relu4a)
merged4b = merge([conv4, conv4b], mode='sum')
bn4b = BatchNormalization(axis=1, epsilon=0.000001)(merged4b)
relu4b = PReLU()(bn4b)
conv4c = Conv2D(filters=128, kernel_size=(1, 9), padding='same', weights=conv4a.get_weights())(relu4b)
merged4c = merge([conv4, conv4c], mode='sum')
bn4c = BatchNormalization(axis=1, epsilon=0.000001)(merged4c)
relu4c = PReLU()(bn4c)
pool4 = MaxPooling2D(pool_size=(1, 4), strides=(1, 4), padding='valid', data_format='channels_last')(relu4c)
drop4 = Dropout(0.2)(pool4)
conv5 = Conv2D(filters=128, kernel_size=(1, 1), padding='same')(drop4)
bn5 = BatchNormalization(axis=1, epsilon=0.000001)(conv5)
relu5 = PReLU()(bn5)
conv5a = Conv2D(filters=128, kernel_size=(1, 9), padding='same')
conv5aa = conv5a(relu5)
merged5a = merge([conv5, conv5aa], mode='sum')
bn5a = BatchNormalization(axis=1, epsilon=0.000001)(merged5a)
relu5a = PReLU()(bn5a)
conv5b = Conv2D(filters=128, kernel_size=(1, 9), padding='same', weights=conv5a.get_weights())(relu5a)
merged5b = merge([conv5, conv5b], mode='sum')
bn5b = BatchNormalization(axis=1, epsilon=0.000001)(merged5b)
relu5b = PReLU()(bn5b)
conv5c = Conv2D(filters=128, kernel_size=(1, 9), padding='same', weights=conv5a.get_weights())(relu5b)
merged5c = merge([conv5, conv5c], mode='sum')
bn5c = BatchNormalization(axis=1, epsilon=0.000001)(merged5c)
relu5c = PReLU()(bn5c)
# pool5 = MaxPooling2D(pool_size=(1, 4), strides=(1, 4), padding='valid', data_format='channels_last')(relu5c)
drop5 = Dropout(0.2)(relu5c)
conv_relu = Activation('sigmoid')(drop5)
# TODO: what is going on with this variable name?
l1111 = Flatten()(conv_relu)
out = Dense(a, activation='softmax')(l1111)
return out
# TODO: documentation
def create_model(self, input_shape, print_summary=False, class_count = 2):
"""Create a new RCNN model instance"""
changed_shape = (1,input_shape[1],input_shape[0])
input_1 = Input(changed_shape)
output = self.RCL(input_1,a)
model = Model(inputs=input_1, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='RMSprop',
metrics=['accuracy'])
self.model = model
return model
|
11531030
|
from matrixio_hal import GPIO
import time
# new pin 0
pin0 = GPIO.Pin(0)
# set pin to output mode
pin0.mode = GPIO.MODE_OUT
while True:
# toggeling high(1) and low(0)
pin0.value ^= GPIO.VALUE_HIGH
time.sleep(0.5)
|
11531042
|
import json
import re
from csv import writer
import requests
from bs4 import BeautifulSoup
def get_data():
agent = "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"
headers = {"User-Agent": agent} # Defining a user agent is necessary to the request
res = requests.get(
"https://www.usnews.com/news/best-countries/overall-rankings", headers=headers,
)
soup = BeautifulSoup(res.text, "html.parser")
script = soup.find_all("script")[-1] # The data is hard coded in a script tag
pattern = r"window\.__APOLLO_STATE__ =(.*?)window\."
data = re.search(pattern, script.text, re.DOTALL).group(1)
data = json.loads(data)
countries = data["$ROOT_QUERY.context"]["rankings"]["json"]
return countries
def write_to_csv():
"""Write the data into a csv file in the current directory"""
countries = get_data()
with open("usnews_ranking.csv", "w") as csv_file:
csv_writer = writer(csv_file)
# Preparing the csv file header
csv_writer.writerow(
[
"rank",
"name",
"gdp",
"population",
"capital",
"gdp_per_capita",
"geographic_region_name",
],
)
# Writing information for each country
for country in countries:
country = country["country_summary"]
if country["overall_rank"]:
csv_writer.writerow(
[
country["overall_rank"],
country["name"],
country["gdp"],
country["population"],
country["capital"],
country["gdp_per_capita"],
country["geographic_region_name"],
],
)
if __name__ == "__main__":
write_to_csv()
|
11531062
|
from typing import List, Optional
from sqlalchemy.orm.session import Session as SessionType
from sqlalchemy_cockroachdb import run_transaction # type: ignore
from src.db.main import Session
from src.db.models.team_match import TeamMatch, TeamMatchORM
def get_team_matches(
year: Optional[int] = None, event: Optional[int] = None
) -> List[TeamMatch]:
def callback(session: SessionType):
data = session.query(TeamMatchORM) # type: ignore
if year is not None:
data = data.filter(TeamMatchORM.year == year) # type: ignore
if event is not None:
data = data.filter(TeamMatchORM.event_id == event) # type: ignore
data: List[TeamMatchORM] = data.all() # type: ignore
return [TeamMatch.from_dict(x.__dict__) for x in data]
return run_transaction(Session, callback) # type: ignore
def get_num_team_matches() -> int:
def callback(session: SessionType):
return session.query(TeamMatchORM).count() # type: ignore
return run_transaction(Session, callback) # type: ignore
|
11531063
|
import tempfile
import pytest
import mxnet as mx
import os
import numpy as np
import numpy.testing as npt
from gluonnlp.models import get_backbone, list_backbone_names
from gluonnlp.utils.parameter import count_parameters
from gluonnlp.utils.lazy_imports import try_import_tvm
mx.npx.set_np()
def test_list_backbone_names():
assert len(list_backbone_names()) > 0
def tvm_enabled():
try:
tvm = try_import_tvm()
return True
except:
return False
@pytest.mark.slow
@pytest.mark.parametrize('name', list_backbone_names())
def test_get_backbone(name, ctx):
with tempfile.TemporaryDirectory() as root, ctx:
# Test for model download
model_cls, cfg, tokenizer, local_params_path, _ = get_backbone(name, root=root)
net = model_cls.from_cfg(cfg)
net.load_parameters(local_params_path)
net.hybridize()
num_params, num_fixed_params = count_parameters(net.collect_params())
assert num_params > 0
# Test for model export + save
if 'gpt2' in name:
pytest.skip('Skipping GPT-2 test')
elif name in ['google_t5_3B', 'google_t5_11B', 'google_mt5_xl', 'google_mt5_xxl']:
pytest.skip('Skipping large T5 (mT5) model test')
batch_size = 1
sequence_length = 4
inputs = mx.np.random.randint(0, 10, (batch_size, sequence_length))
token_types = mx.np.random.randint(0, 2, (batch_size, sequence_length))
valid_length = mx.np.random.randint(1, sequence_length, (batch_size,))
if 'roberta' in name:
out = net(inputs, valid_length)
elif 'xlmr' in name:
out = net(inputs, valid_length)
elif 'bart' in name:
out = net(inputs, valid_length, inputs, valid_length)
elif 'gpt2' in name:
states = net.init_states(batch_size=batch_size, ctx=ctx)
out, new_states = net(inputs, states)
out_np = out.asnumpy()
elif 't5' in name:
out = net(inputs, valid_length, inputs, valid_length)
else:
out = net(inputs, token_types, valid_length)
mx.npx.waitall()
net.export(os.path.join(root, 'model'))
@pytest.mark.serial
@pytest.mark.seed(123)
@pytest.mark.parametrize('model_name',
['google_albert_base_v2',
'google_en_cased_bert_base',
'google_electra_small',
'fairseq_bart_base'])
@pytest.mark.parametrize('batch_size,seq_length', [(2, 4), (1, 4)])
@pytest.mark.parametrize('layout', ['NT', 'TN'])
@pytest.mark.skipif(not tvm_enabled(),
reason='TVM is not supported. So this test is skipped.')
def test_tvm_integration(model_name, batch_size, seq_length, layout, ctx):
tvm = try_import_tvm()
from tvm import relay
from tvm.contrib import graph_executor
from gluonnlp.utils.tvm_utils import get_ec2_tvm_flags, update_tvm_convert_map
update_tvm_convert_map()
tvm_recommended_flags = get_ec2_tvm_flags()
if ctx.device_type == 'gpu':
flags = tvm_recommended_flags['g4']
elif ctx.device_type == 'cpu':
flags = tvm_recommended_flags['c4']
if model_name != 'google_albert_base_v2':
# Skip all other tests
return
else:
raise NotImplementedError
with tempfile.TemporaryDirectory() as root, ctx:
model_cls, cfg, tokenizer, backbone_param_path, _ = get_backbone(model_name, root=root)
cfg.defrost()
cfg.MODEL.layout = layout
cfg.freeze()
model = model_cls.from_cfg(cfg)
model.load_parameters(backbone_param_path)
model.hybridize()
if layout == 'NT':
token_ids = mx.np.random.randint(0, cfg.MODEL.vocab_size, (batch_size, seq_length),
dtype=np.int32)
token_types = mx.np.random.randint(0, 2, (batch_size, seq_length), dtype=np.int32)
valid_length = mx.np.random.randint(seq_length // 2, seq_length, (batch_size,),
dtype=np.int32)
else:
token_ids = mx.np.random.randint(0, cfg.MODEL.vocab_size, (seq_length, batch_size),
dtype=np.int32)
token_types = mx.np.random.randint(0, 2, (seq_length, batch_size), dtype=np.int32)
valid_length = mx.np.random.randint(seq_length // 2, seq_length, (batch_size,),
dtype=np.int32)
if 'bart' in model_name:
mx_out = model(token_ids, valid_length, token_ids, valid_length)
shape_dict = {
'data0': token_ids.shape,
'data1': valid_length.shape,
'data2': token_ids.shape,
'data3': valid_length.shape,
}
dtype_dict = {
'data0': token_ids.dtype.name,
'data1': valid_length.dtype.name,
'data2': token_ids.dtype.name,
'data3': valid_length.dtype.name,
}
elif 'roberta' in model_name or 'xlmr' in model_name:
mx_out = model(token_ids, valid_length)
shape_dict = {
'data0': token_ids.shape,
'data1': valid_length.shape,
}
dtype_dict = {
'data0': token_ids.dtype.name,
'data1': valid_length.dtype.name,
}
else:
mx_out = model(token_ids, token_types, valid_length)
shape_dict = {
'data0': token_ids.shape,
'data1': token_types.shape,
'data2': valid_length.shape
}
dtype_dict = {
'data0': token_ids.dtype.name,
'data1': token_types.dtype.name,
'data2': valid_length.dtype.name
}
sym = model._cached_graph[1]
params = {}
for k, v in model.collect_params().items():
params[v._var_name] = tvm.nd.array(v.data().asnumpy())
mod, params = relay.frontend.from_mxnet(sym, shape=shape_dict, dtype=dtype_dict, arg_params=params)
target = flags['target']
use_gpu = flags['use_gpu']
opt_level = flags['opt_level']
required_pass = flags['required_pass']
with tvm.transform.PassContext(opt_level=opt_level, required_pass=required_pass):
lib = relay.build(mod, target, params=params)
if use_gpu:
ctx = tvm.gpu()
else:
ctx = tvm.cpu()
rt = graph_executor.GraphModule(lib["default"](ctx))
if 'bart' in model_name:
rt.set_input(data0=token_ids.asnumpy(), data1=valid_length.asnumpy(), data2=token_ids.asnumpy(), data3=valid_length.asnumpy())
elif 'roberta' in model_name:
rt.set_input(data0=token_ids.asnumpy(), data1=valid_length.asnumpy())
else:
rt.set_input(data0=token_ids.asnumpy(), data1=token_types.asnumpy(), data2=valid_length.asnumpy())
rt.run()
for i in range(rt.get_num_outputs()):
out = rt.get_output(i)
if rt.get_num_outputs() == 1:
mx_out_gt = mx_out.asnumpy()
else:
mx_out_gt = mx_out[i].asnumpy()
npt.assert_allclose(out.asnumpy(), mx_out_gt, rtol=1e-3, atol=1e-1)
|
11531091
|
from fastapi import Body, APIRouter
from fastapi.encoders import jsonable_encoder
from fastapi.security import HTTPBasicCredentials
from fastapi.responses import RedirectResponse
from passlib.context import CryptContext
from database.database import admin_collection, post_collection
from auth.jwt_handler import signJWT
from database.database import add_admin, retrieve_config
from models.admin import *
from services.SocialConnection.FacebookService import getCurrentUserEmail
router = APIRouter()
hash_helper = CryptContext(schemes=["bcrypt"])
@router.get("/login/fb/")
async def admin_loginFB(code: str = '', state: str = ''):
redirectUrl = await retrieve_config('siteUrl') + '/login/fb'
fbAppId = await retrieve_config('facebook_appId')
if not fbAppId:
return ErrorResponseModel("An error occrred", 500, "Login with Facebook is not enabled.")
oAuthLink = "https://www.facebook.com/v11.0/dialog/oauth?client_id={}&redirect_uri={}&state={}" \
.format(fbAppId, redirectUrl, '') \
+ '&scope=email,pages_manage_posts,pages_read_user_content,pages_show_list'
if code == '':
return RedirectResponse(oAuthLink)
else:
userEmail = await getCurrentUserEmail(code)
admin_user = await admin_collection.find_one({"email": userEmail}, {"_id": 0})
if admin_user:
return signJWT(userEmail)
else:
return ErrorResponseModel("An error occrred", 403, "Incorrect email or password")
@router.post("/login")
async def admin_login(admin_credentials: HTTPBasicCredentials = Body(...)):
# NEW CODE
admin_user = await admin_collection.find_one({"email": admin_credentials.username}, {"_id": 0})
if (admin_user):
password = hash_helper.verify(
admin_credentials.password, admin_user["password"])
if (password):
return signJWT(admin_credentials.username)
return ErrorResponseModel("An error occrred", 403, "Incorrect email or password")
return ErrorResponseModel("An error occrred", 403, "Incorrect email or password")
@router.post("/")
async def admin_signup(admin: AdminModel = Body(...)):
admin_exists = await admin_collection.find_one({"email": admin.email}, {"_id": 0})
if(admin_exists):
return ErrorResponseModel("An error occrred", 500, "Email already exists")
admin.password = <PASSWORD>(admin.password)
new_admin = await add_admin(jsonable_encoder(admin))
return new_admin
|
11531101
|
from typing import List, Optional
from pydantic import PositiveInt
from rastervision.pipeline.config import (Config, register_config, ConfigError,
Field)
from rastervision.pipeline.utils import split_into_groups
from rastervision.core.data.scene_config import SceneConfig
from rastervision.core.data.class_config import ClassConfig
from rastervision.core.data.utils import all_equal
@register_config('dataset')
class DatasetConfig(Config):
"""Config for a Dataset comprising the scenes for train, valid, and test splits."""
class_config: ClassConfig
train_scenes: List[SceneConfig]
validation_scenes: List[SceneConfig]
test_scenes: List[SceneConfig] = []
img_channels: Optional[PositiveInt] = Field(
None, description='The number of channels of the images.')
def update(self, pipeline=None):
super().update()
self.class_config.update(pipeline=pipeline)
for s in self.train_scenes:
s.update(pipeline=pipeline)
for s in self.validation_scenes:
s.update(pipeline=pipeline)
if self.test_scenes is not None:
for s in self.test_scenes:
s.update(pipeline=pipeline)
channel_order = self.train_scenes[0].raster_source.channel_order
if channel_order is not None:
self.img_channels = len(channel_order)
def validate_config(self):
ids = [s.id for s in self.train_scenes]
if len(set(ids)) != len(ids):
raise ConfigError('All training scene ids must be unique.')
ids = [s.id for s in self.validation_scenes + self.test_scenes]
if len(set(ids)) != len(ids):
raise ConfigError(
'All validation and test scene ids must be unique.')
self.ensure_same_channel_order()
def ensure_same_channel_order(self):
all_scenes = self.train_scenes + self.validation_scenes + self.test_scenes
channel_orders = [s.raster_source.channel_order for s in all_scenes]
if not all_equal(channel_orders):
raise ConfigError('channel_order must be same for all scenes.')
def get_split_config(self, split_ind, num_splits):
new_cfg = self.copy()
groups = split_into_groups(self.train_scenes, num_splits)
new_cfg.train_scenes = groups[
split_ind] if split_ind < len(groups) else []
groups = split_into_groups(self.validation_scenes, num_splits)
new_cfg.validation_scenes = groups[
split_ind] if split_ind < len(groups) else []
if self.test_scenes:
groups = split_into_groups(self.test_scenes, num_splits)
new_cfg.test_scenes = groups[
split_ind] if split_ind < len(groups) else []
return new_cfg
def get_all_scenes(self):
return self.train_scenes + self.validation_scenes + self.test_scenes
|
11531116
|
import unittest
from gooey.gui.components.options import options
class TestPrefixFilter(unittest.TestCase):
def test_doc_schenanigans(self):
"""Sanity check that my docstring wrappers all behave as expected"""
@options._include_layout_docs
def no_self_docstring():
pass
@options._include_layout_docs
def yes_self_docstring():
"""sup"""
pass
# gets attached to functions even if they don't have a docstring
self.assertIn(options.LayoutOptions.__doc__, no_self_docstring.__doc__)
# gets attached to the *end* of existing doc strings
self.assertTrue(yes_self_docstring.__doc__.startswith('sup'))
self.assertIn(options.LayoutOptions.__doc__, yes_self_docstring.__doc__)
def test_clean_method(self):
"""
_clean should drop any keys with None values
and flatten the layout_option kwargs to the root level
"""
result = options._clean({'a': None, 'b': 123, 'c': 0})
self.assertEqual(result, {'b': 123, 'c': 0})
result = options._clean({'root_level': 123, 'layout_options': {
'nested': 'hello',
'another': 1234
}})
self.assertEqual(result, {'root_level': 123, 'nested': 'hello', 'another': 1234})
def test_only_provided_arguments_included(self):
"""
More sanity checking that the internal use of locals()
does the Right Thing
"""
option = options.LayoutOptions(label_color='#ffffff')
self.assertIn('label_color', option)
option = options.LayoutOptions()
self.assertNotIn('label_color', option)
option = options.TextField(label_color='#ffffff')
self.assertIn('label_color', option)
option = options.TextField()
self.assertNotIn('label_color', option)
|
11531158
|
from setuptools import setup, find_packages
from setuptools.command.install import install
import os.path
import sys
VERSION='2.3.3'
here = os.path.abspath(os.path.dirname(__file__))
readme_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.md')
try:
from m2r import parse_from_file
readme = parse_from_file(readme_file)
except ImportError:
# m2r may not be installed in user environment
with open(readme_file) as f:
readme = f.read()
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
git_tag = os.getenv('GIT_TAG')
if git_tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
git_tag, VERSION
)
sys.exit(info)
setup(
name='ciftify',
version=VERSION,
description='The tools of the Human Connectome Project (HCP) '\
'adapted for working with non-HCP datasets',
long_description=readme,
url='https://github.com/edickie/ciftify',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3'],
keywords='PINT neuroimaging fMRI cifti gifti nifti HCP',
packages=find_packages(exclude=['tests']),
data_files=[('', ['LICENSE', 'README.md'])],
entry_points={
'console_scripts': [
'cifti_vis_fmri=ciftify.bin.cifti_vis_fmri:main',
'cifti_vis_PINT=ciftify.bin.cifti_vis_PINT:main',
'cifti_vis_recon_all=ciftify.bin.cifti_vis_recon_all:main',
'cifti_vis_map=ciftify.bin.cifti_vis_map:main',
'ciftify_groupmask=ciftify.bin.ciftify_groupmask:main',
'ciftify_meants=ciftify.bin.ciftify_meants:main',
'ciftify_peaktable=ciftify.bin.ciftify_statclust_report:main',
'ciftify_dlabel_report=ciftify.bin.ciftify_dlabel_report:main',
'ciftify_PINT_vertices=ciftify.bin.ciftify_PINT_vertices:main',
'ciftify_clean_img=ciftify.bin.ciftify_clean_img:main',
'ciftify_postPINT1_concat=ciftify.bin.ciftify_postPINT1_concat:main',
'ciftify_postPINT2_sub2sub=ciftify.bin.ciftify_postPINT2_sub2sub:main',
'ciftify_recon_all=ciftify.bin.ciftify_recon_all:main',
'ciftify_surface_rois=ciftify.bin.ciftify_surface_rois:main',
'ciftify_vol_result=ciftify.bin.ciftify_vol_result:main',
'ciftify_seed_corr=ciftify.bin.ciftify_seed_corr:main',
'ciftify_subject_fmri=ciftify.bin.ciftify_subject_fmri:main',
'ciftify_falff=ciftify.bin.ciftify_falff:main',
'ciftify_dlabel_to_vol=ciftify.bin.ciftify_dlabel_to_vol:main',
'ciftify_statclust_report=ciftify.bin.ciftify_statclust_report:main',
'extract_nuisance_regressors=ciftify.bin.extract_nuisance_regressors:main'
],
},
scripts=['ciftify/bin/filter_hcp.sh','ciftify/bin/cifti_vis_RSN'],
install_requires=[
'docopt',
'matplotlib',
'nibabel',
'numpy',
'pandas',
'PyYaml',
'seaborn',
'scipy',
'pillow',
'nilearn',
'scikit-learn',
'pybids>=0.7.0'],
include_package_data=True,
cmdclass={
'verify': VerifyVersionCommand,
}
)
|
11531159
|
import unittest
import mock
from utils import k8s_util
from kubernetes.client.models.v1_node import V1Node
from kubernetes.client.models.v1_node_list import V1NodeList
from kubernetes.client.models.v1_node_status import V1NodeStatus
from kubernetes.client.models.v1_node_address import V1NodeAddress
from kubernetes.client.models.v1_pod_list import V1PodList
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.models.v1_object_meta import V1ObjectMeta
from kubernetes.client.models.v1_pod_spec import V1PodSpec
def _mock_v1_node(internal_ip, hostname):
node = V1Node()
address_ip = V1NodeAddress(internal_ip, "InternalIP")
address_hostname = V1NodeAddress(hostname, "Hostname")
node.status = V1NodeStatus(addresses=[address_ip, address_hostname])
return node
def _mock_v1_pod(jobId, userName, vcName, nodeName):
pod = V1Pod()
pod.metadata = V1ObjectMeta()
pod.metadata.labels = {
"jobId": jobId,
"type": "job",
"userName": userName,
"vcName": vcName
}
pod.spec = V1PodSpec(containers=[])
pod.spec.node_name = nodeName
return pod
class TestK8sUtil(unittest.TestCase):
@mock.patch('utils.k8s_util.list_namespaced_pod')
def test_get_job_info_from_nodes(self, mock_list_namespaced_pod):
pod_one = _mock_v1_pod("87654321-wxyz", "user1", "vc1", "node1")
pod_two = _mock_v1_pod("12345678-abcd", "user2", "vc2", "node1")
pod_three = _mock_v1_pod("12345678-abcd", "user2", "vc2", "node2")
pod_four = _mock_v1_pod("99999999-efgh", "user3", "vc3", "node3")
mock_pod_list = V1PodList(items=[pod_one, pod_two, pod_three, pod_four])
mock_list_namespaced_pod.return_value = mock_pod_list
job_response = k8s_util.get_job_info_from_nodes(
["node1", "node2"], "dlts.domain.com", "cluster1")
self.assertTrue("87654321-wxyz" in job_response)
self.assertEqual(1, len(job_response["87654321-wxyz"]["node_names"]))
self.assertTrue("node1" in job_response["87654321-wxyz"]["node_names"])
self.assertEqual("https://dlts.domain.com/job/vc1/cluster1/87654321-wxyz",
job_response["87654321-wxyz"]["job_link"])
self.assertTrue("12345678-abcd" in job_response)
self.assertEqual(2, len(job_response["12345678-abcd"]["node_names"]))
self.assertTrue("node1" in job_response["12345678-abcd"]["node_names"])
self.assertTrue("node2" in job_response["12345678-abcd"]["node_names"])
self.assertEqual("https://dlts.domain.com/job/vc2/cluster1/12345678-abcd",
job_response["12345678-abcd"]["job_link"])
@mock.patch('utils.k8s_util.list_node')
def test_get_node_address_info(self, mock_list_node):
node_one = _mock_v1_node("192.168.0.1", "mock-worker-one")
node_two = _mock_v1_node("192.168.0.2", "mock-worker-two")
mock_list_node.return_value = V1NodeList(items=[node_one, node_two])
address_info = k8s_util.get_node_address_info()
self.assertEqual(len(address_info), 2)
self.assertTrue('192.168.0.1' in address_info)
self.assertEqual(address_info['192.168.0.1'], "mock-worker-one")
self.assertTrue('192.168.0.2' in address_info)
self.assertEqual(address_info['192.168.0.2'], "mock-worker-two")
@mock.patch('utils.k8s_util.list_node')
def test_get_node_address_info_empty(self, mock_list_node):
mock_list_node.return_value = V1NodeList(items=[])
address_info = k8s_util.get_node_address_info()
self.assertEqual(len(address_info), 0)
|
11531161
|
from setuptools import find_packages, setup
setup(
name="varclr",
version="1.0",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
python_requires=">=3.6",
packages=find_packages(exclude=[]),
install_requires=[
"black>=21.10b0",
"gdown>=4.2.0",
"isort>=5.8.0",
"pandas>=1.1.0",
"pre-commit>=2.15.0",
"pytest>=6.2.4",
"pytorch-lightning>=1.0.8,<1.3",
"sentencepiece>=0.1.95",
"scipy>=1.5.2",
"torch>=1.7.1",
"transformers==4.5.1",
"wandb>=0.12.6",
],
)
|
11531183
|
from textwrap import wrap
from decimal import Decimal
from django.template.loader import render_to_string
from django.db.models import Avg
from notifications import constants
from std_bounties.models import Bounty, Fulfillment
from bounties.utils import (
bounty_url_for,
profile_url_for,
shorten_address,
calculate_token_value,
token_decimals,
usd_decimals
)
from bounties.settings import ENVIRONMENT
default_image = ('https://gallery.mailchimp.com/03351ad14a86e9637146ada2a'
'/images/fae20fec-36ab-4594-9753-643c04e0ab9a.png')
class Email:
# Supported notification types that have an email template:
templates = {
constants.FULFILLMENT_SUBMITTED_ISSUER: 'fulfillmentSubmitted.html',
constants.FULFILLMENT_ACCEPTED_FULFILLER: 'submissionAccepted.html',
constants.CONTRIBUTION_ADDED: 'contributionReceived.html',
constants.CONTRIBUTION_RECEIVED: 'contributionReceived.html',
constants.ISSUER_TRANSFERRED: 'bountyTransferSent.html',
constants.TRANSFER_RECIPIENT: 'bountyTransferReceived.html',
constants.BOUNTY_EXPIRED: 'bountyExpired.html',
constants.BOUNTY_COMMENT_RECEIVED: 'commentOnFulfilledBounty.html',
constants.BOUNTY_COMMENT_RECEIVED_COMMENTER: 'commentOnBounty.html',
constants.BOUNTY_COMMENT_RECEIVED_ISSUER: 'commentOnMyBounty.html',
constants.FULFILLMENT_UPDATED: 'fulfillmentUpdated.html',
constants.FULFILLMENT_UPDATED_ISSUER: 'fulfillmentUpdatedIssuer.html',
constants.RATING_RECEIVED: 'receivedRating.html',
constants.BOUNTY_COMPLETED: 'bountyCompleted.html',
constants.APPLICATION_RECEIVED: 'applicationReceived.html',
constants.APPLICATION_ACCEPTED_APPLICANT: 'applicationAccepted.html',
constants.APPLICATION_REJECTED_APPLICANT: 'applicationRejected.html',
constants.BOUNTY_CHANGED: 'bountyChangedFulfiller.html',
constants.BOUNTY_CHANGED_APPLICANT: 'bountyChangedApplicant.html',
constants.FULFILLMENT_COMMENT_RECEIVED: 'fulfillmentCommentReceived.html',
constants.FULFILLMENT_COMMENT_RECEIVED_ISSUER: 'fulfillmentCommentReceivedIssuer.html',
constants.FULFILLMENT_COMMENT_RECEIVED_COMMENTER: 'fulfillmentCommentReceivedCommenter.html',
}
max_description_length = 240
max_title_length = 120
@staticmethod
def rating_color(rating):
if rating >= 4:
return '#6FC78D' # 'brand-green'
elif rating >= 3:
return '#FBAA31' # 'brand-orange'
else:
return '#D14545' # 'brand-red'
def __init__(self, **kwargs):
bounty = kwargs['bounty']
url = kwargs['url']
user = kwargs['user']
issuer = kwargs['issuer']
from_user = kwargs['from_user']
notification_name = kwargs['notification_name']
review = kwargs.get('review')
comment = kwargs.get('comment')
description = kwargs.get('fulfillment_description', '')
preview_text = kwargs.get('string_data', '')
application_message = kwargs.get('application_message', '')
rejection_message = kwargs.get('rejection_message', '')
if notification_name.__class__ != int:
raise TypeError('notification_name must be of type int')
elif notification_name not in Email.templates:
raise ValueError(
'notification_name {} must be a valid notification'.format(
notification_name))
if bounty.__class__ != Bounty:
raise TypeError('bounty must be of type Bounty')
remaining = token_decimals(bounty.calculated_balance)
token_amount = token_decimals(bounty.calculated_fulfillment_amount)
if len(description) > self.max_description_length:
# Cut off at the closest word after the limit
description = wrap(
description,
self.max_description_length
)[0] + ' ...'
title = bounty.title
if len(title) > self.max_title_length:
# Cut off at the closest word after the limit
title = wrap(title, self.max_title_length)[0] + ' ...'
if not url or len(url) == 0:
url = bounty_url_for(bounty.id, bounty.platform)
remaining_submissions = 0
if (notification_name == constants.BOUNTY_EXPIRED or
notification_name == constants.CONTRIBUTION_RECEIVED or
notification_name == constants.CONTRIBUTION_ADDED):
remaining_submissions = Fulfillment.objects.filter(
bounty_id=bounty.id,
accepted=False,
).all().count()
total_submissions = Fulfillment.objects.filter(
bounty_id=bounty.id,
accepted=False,
).all().count()
submissions = ''
if total_submissions == 1:
submissions = '{} submission'.format(total_submissions)
else:
submissions = '{} submissions'.format(total_submissions)
remaining_usd = ' unknown'
if bounty.token_lock_price:
remaining_usd = usd_decimals(
remaining * usd_decimals(bounty.token_lock_price))
elif bounty.token and bounty.token.price_usd:
remaining_usd = usd_decimals(
remaining * usd_decimals(bounty.token.price_usd))
added_amount = 0
if (notification_name == constants.CONTRIBUTION_RECEIVED or
notification_name == constants.CONTRIBUTION_ADDED):
amount = kwargs['amount']
added_amount = token_decimals(calculate_token_value(
int(Decimal(amount)), bounty.token_decimals))
rating_url = url
if notification_name == constants.FULFILLMENT_ACCEPTED_FULFILLER:
rating_url = '{}?fulfillment_id={}&rating=true'.format(
url, kwargs['fulfillment_id'])
user_address_link = (
user and profile_url_for(user.public_address, bounty.platform)
)
ratings = None
rating_link = None
if notification_name == constants.RATING_RECEIVED:
user_reviewees = user.reviewees.filter(platform=bounty.platform)
rating_link = user_address_link + '?reviews=true'
if user.public_address == issuer.public_address:
# Rating for the issuer from the fulfiller
ratings = user_reviewees.filter(
issuer_review__isnull=False)
else:
# Rating for the fulfiller from the issuer
ratings = user_reviewees.filter(
fulfillment_review__isnull=False)
rating_link += '&fulfiller=true'
rating_count = ratings and ratings.count() or 0
average_rating = ratings and ratings.aggregate(
Avg('rating')).get('rating__avg') or 0
self.__dict__.update({
'bounty': bounty,
'bounty_title': title,
'url': url,
'preferences_link': 'https://{}bounties.network/settings'.format('' if ENVIRONMENT == 'production' else 'staging.'),
'notification_name': notification_name,
'usd_amount': usd_decimals(bounty.usd_price),
'token_amount': token_amount,
'token': bounty.token_symbol,
'token_amount_remaining': remaining,
'usd_amount_remaining': remaining_usd,
'added_amount': added_amount,
'remaining_submissions': remaining_submissions,
'fulfillment_description': description,
'application_message': application_message,
'issuer_name': issuer and issuer.name,
'issuer_address': issuer and shorten_address(issuer.public_address),
'issuer_profile_image': (issuer and issuer.small_profile_image_url or default_image),
'issuer_address_link': issuer and profile_url_for(issuer.public_address, bounty.platform),
'user_name': user and user.name,
'user_address': user and shorten_address(user.public_address),
'user_profile_image': (user and user.small_profile_image_url or default_image),
'user_address_link': user_address_link,
'from_user_name': from_user and from_user.name,
'from_user_address': from_user and shorten_address(from_user.public_address),
'from_user_profile_image': (from_user and from_user.small_profile_image_url or default_image),
'from_user_address_link': from_user and profile_url_for(from_user.public_address, bounty.platform),
'from_user_email': from_user and from_user.email,
'review': review and review.review,
'rating': review and '{}/5'.format(review.rating),
'rating_color': review and Email.rating_color(review.rating),
'comment': comment and comment.text,
'MC_PREVIEW_TEXT': preview_text,
'rating_url': rating_url,
'average_rating': usd_decimals(average_rating),
'rating_count': rating_count,
'rating_link': rating_link,
'contribute_url': url + '?contribute=true',
'submissions': submissions,
'rejection_message': rejection_message
})
def render(self):
template = self.templates[self.notification_name]
return render_to_string(template, context=self.__dict__)
def render_to_file(self, filename=None):
if not filename:
filename = '{}-{}-{}.html'.format(
self.notification_name,
self.bounty.bounty_id,
# Only alphanumeric characters for filename
''.join(filter(str.isalnum, self.bounty.title))
)
open_file = open(filename, 'w')
open_file.write(self.render())
open_file.close()
|
11531234
|
import io
class MeteredFile(io.BufferedRandom):
"""Implement using a subclassing model."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._read_bytes = 0
self._read_ops = 0
self._write_bytes = 0
self._write_ops = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return super().__exit__(exc_type, exc_val, exc_tb)
def __iter__(self):
return self
def __next__(self):
self._read_ops += 1
data = super().readline()
self._read_bytes += len(data)
if data:
return data
raise StopIteration
def read(self, size=-1):
self._read_ops += 1
data = super().read(size)
self._read_bytes += len(data)
return data
@property
def read_bytes(self):
return self._read_bytes
@property
def read_ops(self):
return self._read_ops
def write(self, b):
self._write_ops += 1
length = super().write(b)
self._write_bytes += length
return length
@property
def write_bytes(self):
return self._write_bytes
@property
def write_ops(self):
return self._write_ops
class MeteredSocket:
"""Implement using a delegation model."""
def __init__(self, socket):
self._socket = socket
self._recv_bytes = 0
self._recv_ops = 0
self._send_bytes = 0
self._send_ops = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return self._socket.__exit__(exc_type, exc_val, exc_tb)
def recv(self, bufsize, flags=0):
self._recv_ops += 1
data = self._socket.recv(bufsize, flags)
self._recv_bytes += len(data)
return data
@property
def recv_bytes(self):
return self._recv_bytes
@property
def recv_ops(self):
return self._recv_ops
def send(self, data, flags=0):
self._send_ops += 1
length = self._socket.send(data, flags)
self._send_bytes += length
return length
@property
def send_bytes(self):
return self._send_bytes
@property
def send_ops(self):
return self._send_ops
|
11531241
|
import os
import pathlib
import inotify.adapters
import inotify.constants
from base_camera import BaseCamera
class CameraFile(BaseCamera):
@staticmethod
def frames():
filename = os.getenv('MOABFRAME', '/tmp/camera/frame.jpg')
dirname = os.path.dirname(filename)
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
i = inotify.adapters.Inotify()
# inotify works on folder level, so extract folder from filename
i.add_watch(dirname, inotify.constants.IN_CLOSE_WRITE)
# filesystem event generator blocks until a change is detected
for event in i.event_gen(yield_nones=False):
with open(filename, 'rb') as f:
yield f.read()
|
11531262
|
from ballet.validation.base import BaseValidator
from ballet.validation.common import ChangeCollector
class ProjectStructureValidator(BaseValidator):
def __init__(self, project):
self.change_collector = ChangeCollector(project)
def validate(self):
changes = self.change_collector.collect_changes()
return not changes.inadmissible_diffs
|
11531280
|
from __future__ import annotations
from typing import Any, Dict, List, Optional, Union
from enum import Enum
from pydantic import BaseModel, Extra, Field, conint, constr
###################
# Package section #
###################
class Package(BaseModel):
class Config:
extra = Extra.forbid
name: str = Field(description="The package name")
version: str = Field(description="The package version")
###################
# Source section #
###################
sha256str = constr(min_length=64, max_length=64, regex=r"^[0-9a-fA-F]{64}$")
md5str = constr(min_length=32, max_length=32, regex=r"^[0-9a-fA-F]{32}$")
sha1 = constr(min_length=40, max_length=40, regex=r"^[0-9a-fA-F]{40}$")
# We require some paths to contain no backslashes, even on Windows
path_no_backslash = constr(regex=r"^[^\\]+$")
ListNoBackslash = List[path_no_backslash]
class BaseSourceItem(BaseModel):
class Config:
extra = Extra.forbid
patches: Optional[List[str]] = None
destination: Optional[str] = None
class UrlSource(BaseSourceItem):
url: str = None
sha256: Optional[sha256str] = None
md5: Optional[md5str] = None
sha1: Optional[sha1] = None
fn: Optional[str] = None
class GitSource(BaseSourceItem):
git_rev: str = "HEAD"
git_url: str
git_depth: int = -1
class HgSource(BaseSourceItem):
hg_url: str
hg_tag: str = "tip"
class SvnSource(BaseSourceItem):
svn_url: str
svn_rev: str = "head"
svn_ignore_externals: bool = False
class LocalSource(BaseSourceItem):
path: str
SourceItem = Union[UrlSource, GitSource, HgSource, SvnSource, LocalSource]
###################
# Build section #
###################
class NoarchType(Enum):
generic = "generic"
python = "python"
class Build(BaseModel):
class Config:
extra = Extra.forbid
number: Optional[conint(ge=0)] = Field(
0,
description="Build number to version current build in addition to package version",
)
string: Optional[str] = Field(
None,
description="Build string to identify build variant (if not explicitly set, computed automatically from used build variant)",
)
skip: Optional[List[str]] = Field(
None,
description="List of conditions under which to skip the build of the package.",
)
script: Optional[Union[str, List[str]]] = Field(
None,
description="Build script to be used. If not given, tries to find 'build.sh' on Unix or 'bld.bat' on Windows inside the recipe folder.",
)
noarch: Optional[NoarchType] = Field(
None,
description="Can be either 'generic' or 'python'. A noarch 'python' package compiles .pyc files upon installation.",
)
# Note: entry points only valid if noarch: python is used! Write custom validator?
entry_points: Optional[List[str]] = None
# Deprecated
# noarch_python: bool = False
run_exports: Optional[Dict[str, Any]] = None
ignore_run_exports: Optional[List[str]] = None
ignore_run_exports_from: Optional[List[str]] = None
# deprecated, but still used to downweigh packages
track_features: Optional[List[str]] = None
# Features are completely deprecated
# features: List[str]
# requires_features: Dict[str, str]
# provides_features: Dict[str, str],
include_recipe: bool = Field(True, description="Include recipe in final package.")
pre_link: Optional[str] = Field(
None,
alias="pre-link",
description="Script to execute when installing - before linking. Highly discouraged!",
)
post_link: Optional[str] = Field(
None,
alias="post-link",
description="Script to execute when installing - after linking.",
)
pre_unlink: Optional[str] = Field(
None,
alias="pre-unlink",
description="Script to execute when removing - before unlinking.",
)
osx_is_app: bool = False
disable_pip: bool = False
preserve_egg_dir: bool = False
no_link: Optional[ListNoBackslash] = None
binary_relocation: Union[bool, ListNoBackslash] = True
has_prefix_files: ListNoBackslash = []
binary_has_prefix_files: Optional[ListNoBackslash] = None
ignore_prefix_files: Union[bool, ListNoBackslash] = False
# the following is defaulting to True on UNIX and False on Windows
detect_binary_files_with_prefix: Optional[bool] = None
skip_compile_pyc: Optional[List[str]] = None
rpaths: Optional[List[str]] = None
rpaths_patcher: Optional[str] = None
# Note: this deviates from conda-build `script_env`!
script_env: Optional[Dict[str, str]] = None
# Files to be included even if they are present in the PREFIX before building
always_include_files: Optional[List[str]] = None
# msvc_compiler: Optional[str] = None -- deprecated in conda_build
# pin_depends: Optional[str] -- did not find usage anywhere, removed
# preferred_env: Optional[str]
# preferred_env_executable_paths': Optional[List]
# note didnt find _any_ usage of force_use_keys in conda-forge
force_use_keys: Optional[List[str]] = None
force_ignore_keys: Optional[List[str]] = None
merge_build_host: bool = False
missing_dso_whitelist: Optional[List[str]] = None
error_overdepending: bool = Field(False, description="Error on overdepending")
error_overlinking: bool = Field(False, description="Error on overlinking")
###################
# About section #
###################
class About(BaseModel):
# URLs
home: Optional[str] = None
dev_url: Optional[str] = None
doc_url: Optional[str] = None
doc_source_url: Optional[str] = None
license_url: Optional[str] = None
# Text
license_: Optional[str] = Field(None, alias="license")
summary: Optional[str] = None
description: Optional[str] = None
license_family: Optional[str] = None
# Lists
identifiers: Optional[List[str]] = None
tags: Optional[List[str]] = None
keywords: Optional[List[str]] = None
# Paths in source tree
license_file: Optional[str] = None
prelink_message: Optional[str] = None
readme: Optional[str] = None
#########################
# Requirements Section #
#########################
class Requirements(BaseModel):
build: Optional[List[str]] = None
host: Optional[List[str]] = None
run: Optional[List[str]] = None
run_constrained: Optional[List[str]] = None
class Test(BaseModel):
files: Optional[List[str]] = Field(
None,
description="Test files that are copied from the recipe into the temporary test directory and are needed during testing.",
)
source_files: Optional[List[str]] = Field(
None,
description="Test files that are copied from the source work directory into the temporary test directory and are needed during testing.",
)
requires: Optional[List[str]] = Field(
None,
description="In addition to the runtime requirements, you can specify requirements needed during testing.",
)
imports: Optional[List[str]] = Field(None, description="Test importing modules.")
commands: Optional[List[str]] = Field(
None, description="The test commands to execute."
)
class Output(BaseModel):
package: Package = Field(..., description="The package name and version")
build: Optional[Build] = None
requirements: Optional[Requirements] = None
test: Optional[Test] = None
class BoaRecipeV1(BaseModel):
class Config:
extra = Extra.forbid
context: Optional[Dict[str, Any]] = Field(None, description="The recipe context.")
package: Optional[Package] = Field(
None, description="The package name and version."
)
source: Optional[List[SourceItem]] = Field(
None, description="The source items to be downloaded and used for the build."
)
build: Optional[Build] = None
features: Optional[List] = None
steps: Optional[List[Output]] = None
about: Optional[About] = None
extra: Optional[Dict[str, Any]] = None
if __name__ == "__main__":
print(BoaRecipeV1.schema_json(indent=2))
|
11531284
|
from __future__ import print_function, division
from sympy import zeros, eye, Symbol, solve_linear_system
N = 8
M = zeros(N, N + 1)
M[:, :N] = eye(N)
S = [Symbol('A%i' % i) for i in range(N)]
def timeit_linsolve_trivial():
solve_linear_system(M, *S)
|
11531294
|
import os
import random
from colbert.utils.parser import Arguments
from colbert.utils.runs import Run
from colbert.evaluation.loaders import load_colbert, load_qrels, load_queries
from colbert.indexing.faiss import get_faiss_index_name
from colbert.ranking.retrieval import retrieve
from colbert.ranking.batch_retrieval import batch_retrieve
def main():
random.seed(12345)
parser = Arguments(description='End-to-end retrieval and ranking with ColBERT.')
parser.add_model_parameters()
parser.add_model_inference_parameters()
parser.add_ranking_input()
parser.add_retrieval_input()
parser.add_argument('--faiss_name', dest='faiss_name', default=None, type=str)
parser.add_argument('--faiss_depth', dest='faiss_depth', default=1024, type=int)
parser.add_argument('--part-range', dest='part_range', default=None, type=str)
parser.add_argument('--batch', dest='batch', default=False, action='store_true')
parser.add_argument('--depth', dest='depth', default=1000, type=int)
args = parser.parse()
args.depth = args.depth if args.depth > 0 else None
if args.part_range:
part_offset, part_endpos = map(int, args.part_range.split('..'))
args.part_range = range(part_offset, part_endpos)
with Run.context():
args.colbert, args.checkpoint = load_colbert(args)
args.qrels = load_qrels(args.qrels)
args.queries = load_queries(args.queries)
args.index_path = os.path.join(args.index_root, args.index_name)
if args.faiss_name is not None:
args.faiss_index_path = os.path.join(args.index_path, args.faiss_name)
else:
args.faiss_index_path = os.path.join(args.index_path, get_faiss_index_name(args))
if args.batch:
batch_retrieve(args)
else:
retrieve(args)
if __name__ == "__main__":
main()
|
11531295
|
from typing import Dict
import sys
from . import console
def get_imported_packages() -> Dict[str, str]:
"""
Returns a list of packages that have been imported, as a {name: version} dict.
"""
try:
# Should we vendor pkg_resources? See https://github.com/replicate/keepsake/issues/350
import pkg_resources
except ImportError:
console.warn(
"Could not import setuptools/pkg_resources, not tracking package versions"
)
return {}
result = {}
for d in pkg_resources.working_set:
if is_imported(d.key):
result[d.key] = d.version
return result
def is_imported(module_name):
return module_name in sys.modules
|
11531306
|
from django.views import defaults
def bad_request_400_custom(
request,
exception,
template_name='core/400.html'
):
return defaults.bad_request(
request=request,
exception=exception,
template_name=template_name
)
def permission_denied_403_custom(
request,
exception,
template_name='core/403.html'
):
return defaults.permission_denied(
request=request,
exception=exception,
template_name=template_name
)
def page_not_found_404_custom(
request,
exception,
template_name='core/404.html'
):
return defaults.page_not_found(
request=request,
exception=exception,
template_name=template_name
)
def server_error_500_custom(
request,
template_name='core/500.html'
):
return defaults.server_error(
request=request,
template_name=template_name
)
|
11531310
|
from typing import List, Optional
import re
import spacy
import nltk
import numpy as np
import tensorflow as tf
def load_glove(filepath: str):
"""Load GLoVe data"""
wv = {}
with open(filepath, 'rt') as f:
for line in f:
tokens = line.strip().split(' ')
wv[tokens[0]] = [float(t) for t in tokens[1:]]
return wv
def prune_word_vectors(wv: dict, vocab: set):
"""
Remove vectors of the words which does not appear in vocab
Args:
wv: word vectors, mapping from a string to an array of floats
vocab: set of words
Returns:
Word vectors pruned from wv
"""
# Extend the vocab to set of all lowercase-words
exp_vocab = vocab.union({w.lower() for w in vocab})
# The final vocabulary is the intersection between vocab accumulated from SQuAD data and all GLoVe words
for w in list(wv.keys()):
if w not in exp_vocab and w.lower() not in exp_vocab:
del wv[w]
return wv
def to_chars(tokens: List[str], token_len: int, pad_char: str):
"""
Convert each token from a list into a list of chars.
Args:
tokens: list of strings
token_len: fixed num of chars for each word
Returns:
char_tokens: List[List[str]]: For each token in the input, convert to a list of chars (fixed len). Increase 1 more dim with respect to the input.
"""
def convert(tok):
chars = list(tok)
if len(chars) < token_len:
chars += [pad_char] * (token_len - len(chars))
return chars[:token_len]
char_list = list([convert(tok) for tok in tokens])
return char_list
def augment_long_text(context: List[str], answers: List[dict]):
"""
Improve a context text for better tokenization based on all answers of that context.
Motivation:
- The context may be wrongly tokenized,
Ex1: the sequence "...Beyoncé married <NAME>. She publicly..." is tokenized as [..., 'Beyoncé', 'married', 'Jay', 'Z.', 'She', 'publicly',...]
(in the paragraph including the question '56be95823aeaaa14008c910c')
Ex2: the sequence "grossing $68 million—$60 million more than Cadillac Records" is tokenized as [..., 'grossing', '$', '68', 'million—$60', 'million', 'more', 'than', 'Cadillac', 'Records',...]
(in the paragraph including the question '56bf99aca10cfb14005511ab')
- In some cases, answers cannot be matched with those wrong tokens.
In Ex1, the answer ['Jay', 'Z'] is misaligned with ['Jay', 'Z.']
In Ex2, the answer ['60', 'million'] is misaligned with ['million—$60', 'million']
Solution:
- Use tokens from all related answers to fix the wrong tokens.
Args:
context: raw text of the paragraph context
answers: List of answers related to the context. Each answer is a dict which contains 2 keys: 'text' and 'answer_start' as in json raw answer data.
Returns:
Augmented context: context text with some spaces inserted to guide the tokenization.
"""
# print(answers)
answers = sorted(answers, key=lambda a: a['answer_start'], reverse=True)
# Remove duplicated answers
i = 0
while i < len(answers)-1:
if answers[i]['answer_start'] == answers[i+1]['answer_start']:
answers.remove(answers[i+1])
else:
i += 1
# Insert SPACE into context to guide the tokenizer.
for answer in answers:
start = answer['answer_start']
end = start + len(answer['text'])
if end < len(context)-1 and not context[end+1].isalnum() and context[end+1] != ' ':
context = context[:end] + ' ' + context[end:]
if start > 0 and not context[start-1].isalnum() and context[start-1] != ' ':
context = context[:start] + ' ' + context[start:]
return context
nlp = spacy.load('en', disable=['parser', 'tagger', 'ner'])
# sent_tokenzier = nltk.load('tokenizers/punkt/english.pickle')
# word_tokenizer = nltk.TreebankWordTokenizer()
def tokenize(text):
##### Tokenization with NLTK
# tokens = []
# sents = sent_tokenzier.tokenize(text)
# for sent in sents:
# tokens.extend(word_tokenizer.tokenize(sent))
# return tokens
##### Tokenization with spacy
doc = nlp(text)
return list([tok.text for tok in doc if tok.text and tok.text != ' '])
# PATT = re.compile('\[[a-z]+ [0-9]+\]')
PATT = re.compile('\[[a-z0-9 ]+\]')
def tokenize_long_text(text):
splitters = PATT.findall(text)
if len(splitters) > 0:
toks = []
segments = PATT.split(text)
if len(splitters) < len(segments):
splitters.append('')
for seg1, seg2 in zip(segments, splitters):
for txt in [seg1, seg2]:
if txt:
toks.extend(tokenize(txt))
return toks
else:
return tokenize(text)
def align(context: str, context_toks: List[str]):
"""
Align tokens with their original text
Args:
context: original text
context_toks: list of tokens of the original text
Return:
anchors: list of index in the original text for each tokens,
i.e., context[anchors[i]] is the start position of the context_toks[i] in context.
"""
curr = 0
anchors = []
for tok in context_toks:
try:
idx = context.index(tok, curr)
if idx >= 0:
anchors.append(idx)
curr = idx + len(tok)
except ValueError:
print('Cannot align tokens with original text whe tokens: {}, orig text: {}'.format(tok, context[curr:]))
return None
return anchors
def get_batch(X_data, y_data=None, batch_size=32, shuffle=False):
bs = batch_size
y_data = y_data or []
N = len(X_data[0])
idx = np.arange(N)
if shuffle:
np.random.shuffle(idx)
num_batches = (N-1) // bs + 1
for b in range(num_batches):
idx_from = b * bs
idx_to = min((b+1)*bs, N)
X_batch = [X[idx[idx_from:idx_to]] for X in X_data]
y_batch = [y[idx[idx_from:idx_to]] if len(y) > 0 else [] for y in y_data]
yield (X_batch, y_batch)
MINUS_INFINITY = -1e30
def mask_logits(logits, mask):
return logits + MINUS_INFINITY * (1 - tf.cast(mask, tf.float32))
|
11531359
|
import json
from datetime import datetime
from os import path
from ralph.operations.changemanagement import jira
from ralph.operations.changemanagement.exceptions import IgnoreOperation
from ralph.operations.models import OperationStatus
from ralph.tests import RalphTestCase
class JiraProcessorTestCase(RalphTestCase):
def setUp(self):
with open(
path.join(path.dirname(__file__), 'sample_jira_event.json'), 'r'
) as f:
self.jira_event = json.load(f)
def test_get_assignee_username(self):
self.assertEqual(
'username.fortytwo',
jira.get_assignee_username(self.jira_event)
)
def test_get_assignee_username_no_assignee_returns_none(self):
self.jira_event['issue']['fields']['assignee'] = None
self.assertIsNone(jira.get_assignee_username(self.jira_event))
def test_get_reporter_username(self):
self.assertEqual(
'username.fourtwenty',
jira.get_reporter_username(self.jira_event)
)
def test_get_reporter_username_no_reporter_returns_none(self):
self.jira_event['issue']['fields']['reporter'] = None
self.assertIsNone(jira.get_reporter_username(self.jira_event))
def test_get_title(self):
self.assertEqual(
'THIS IS THE SUMMARY',
jira.get_title(self.jira_event)
)
def test_get_description(self):
self.assertEqual(
'THAT IS A TEST TICKET',
jira.get_description(self.jira_event)
)
def test_get_create_datetime(self):
self.assertEqual(
datetime(2017, 3, 20, 9, 10, 40, 0),
jira.get_creation_date(self.jira_event)
)
def test_get_last_update_datetime(self):
self.assertEqual(
datetime(2017, 3, 20, 11, 33, 44, 0),
jira.get_last_update_date(self.jira_event)
)
def test_get_resolution_datetime(self):
self.jira_event['issue']['fields']['resolutiondate'] = (
'2017-03-20T14:10:40.000+0100'
)
self.assertEqual(
datetime(2017, 3, 20, 13, 10, 40, 0),
jira.get_resolution_date(self.jira_event)
)
def test_get_resolution_datetime_no_time_returns_none(self):
self.assertIsNone(jira.get_resolution_date(self.jira_event))
def test_get_ticket_id(self):
self.assertEqual('SOMEPROJ-42', jira.get_ticket_id(self.jira_event))
def test_get_operation_name(self):
self.assertEqual('Change', jira.get_operation_name(self.jira_event))
def test_get_operation_status(self):
self.assertEqual(
'Open',
jira.get_operation_status(self.jira_event)
)
|
11531361
|
from gzip import open as gopen
from re import search
from lxml import etree
from utility import dumpStruct, loadStruct, createPath
from structures import f_misc, langs
def extractAlignmentsRX(f_align, f_align_p, f_stats):
""" Extracts the alignments with regex.
Easier to parse HUN aligned files, which will be dropped due to inconsistencies. Mainly used for the small
OpenSubtitles corpus not the 2011er one.
"""
print "Extracting alignments"
alignments = {}
final = {}
hun_files = set()
doc_count = 0
link_count = 0
with gopen(f_align) as align_f:
for line in align_f:
line = line.strip()
if line.startswith("<linkGrp"):
doc_count += 1
m = search("fromDoc=\"(.+)\"\stoDoc=\"(.+)\"", line)
if m:
key = (m.group(1), m.group(2))
elif not m:
m = search("toDoc=\"(.+)\"\sfromDoc=\"(.+)\"", line)
key = (m.group(2), m.group(1))
alignments.setdefault(key, [])
elif line.startswith("<link id="):
link_count += 1
m = search("xtargets=\"(.+?)\"", line)
alignments[key].append(m.group(1).split(";"))
elif line.startswith("<link certainty="):
hun_files.add(key)
if key in alignments:
del alignments[key]
continue
empty = set()
for k, v in alignments.iteritems():
if len(v) != 0:
final.setdefault(k, v)
else:
empty.add(k)
dumpStruct(f_align_p, final)
createPath(f_stats)
with open(f_stats, "w") as stats:
stats.write("DOCS: %d\nHUN: %d\nEMPTY: %d\nLEFT: %d\nLINKS: %d\n\n" %
(doc_count, len(hun_files), len(empty), len(final), link_count))
for k in hun_files:
stats.write(k[0] + " || " + k[1] + "\n")
stats.write("\n")
def extractAlignmentsLXML(f_align, f_align_p, f_stats):
""" Extracts alignment information from the alignments file with LXML.
Used for the large OpenSubtitles 2011 corpus for faster processing.
"""
print "Extracting alignments"
class Target(object):
def __init__(self):
self.d = dict()
self.n_links = 0
self.n_docs = 0
def start(self, tag, attr):
if tag == "linkGrp":
self.n_docs += 1
self.k = (attr["fromDoc"], attr["toDoc"])
self.group = self.d[self.k] = []
elif tag == "link":
self.n_links += 1
self.group.append(tuple(attr["xtargets"].split(";")))
if "certainty" in attr:
print "Attention HUN: %s" % self.k
def close(self):
pass
with gopen(f_align) as xml:
targets = Target()
parser = etree.XMLParser(target=targets)
etree.parse(xml, parser)
alignments = targets.d
# Documents with no alignments
empty = set()
for k, v in alignments.iteritems():
if not len(v):
empty.add(k)
del targets.d[k]
dumpStruct(f_align_p, alignments)
createPath(f_stats)
with open(f_stats, "w") as stats:
stats.write("DOCS: %d\nEMPTY: %d\nLEFT: %d\nLINKS: %d\n\n" %
(targets.n_docs, len(empty), len(alignments), targets.n_links))
for k in empty:
stats.write("!!! Empty files\n%s || %s\n" % (k[0], k[1]))
stats.write("\n")
def countSentences(align, fout_stats):
""" Count sentences from alignment structure.
"""
print "Counting sentences"
de_total = 0
en_total = 0
align_de = {}
align_en = {}
with open(fout_stats, "a") as stats:
for doc, align in align.iteritems():
de_doc = 0
en_doc = 0
for pro in align:
for _ in pro[0].split(" "):
if _ != "":
de_doc += 1
for _ in pro[1].split(" "):
if _ != "":
en_doc += 1
align_de.setdefault(doc[0].rsplit("/", 1)[1].replace(".gz", ""), de_doc)
align_en.setdefault(doc[1].rsplit("/", 1)[1].replace(".gz", ""), en_doc)
stats.write("%s \t %s\n%d \t %d\n" % (doc[0], doc[1], de_doc, en_doc))
de_total += de_doc
en_total += en_doc
stats.write("\nDE Sentences: %d\nEN Sentences: %d\n" % (de_total, en_total))
dumpStruct(f_misc + "de_align.p", align_de)
dumpStruct(f_misc + "en_align.p", align_en)
def compareSentenceCount(misc):
""" Compares sentence count from grep -c "s id" for checking purposes.
Save the grep output as lang_count.txt.
"""
print "Comparing sentence counts"
for lang in langs:
a = loadStruct(misc + lang + "_align.p")
b = {}
with open(misc + "%s_count.txt" % lang) as counts:
for line in counts:
k, v = line.strip().split(":")
b.setdefault(k, v)
for k1, v1 in a.iteritems():
for k2, v2 in b.iteritems():
if k1 == k2:
if str(v1) != v2:
print k1, k2
print v1, v2
|
11531373
|
from pypureclient.flashblade import BucketReplicaLinkPost
# create a replica link from a specified local bucket, to a specified remote bucket
# on the remote corresponding to the remote credentials
local_bucket_names = ["localbucket"]
remote_bucket_names = ["remotebucket"]
remote_credentials_names = ["remote/credentials"]
# We can specify if we want to enable cascading and if we want to pause the replica link immediately at creation
my_replica_link = BucketReplicaLinkPost(cascading_enabled=True, paused=False)
# post the bucket replica link object on the local array
res = client.post_bucket_replica_links(local_bucket_names=local_bucket_names,
remote_bucket_names=remote_bucket_names,
remote_credentials_names=remote_credentials_names,
bucket_replica_link=my_replica_link)
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# Other valid fields: local_bucket_ids, remote_credentials_ids
# See ids in section "Common Fields" for examples
|
11531391
|
from urllib.parse import quote
from django.http import HttpResponseRedirect, JsonResponse
from django.views import generic
from jet_django import settings
from jet_django.mixins.cors_api_view import CORSAPIViewMixin
from jet_django.utils.backend import register_token, is_token_activated
class RegisterView(CORSAPIViewMixin, generic.RedirectView):
def get(self, request, *args, **kwargs):
token, created = register_token()
if not token:
return
if is_token_activated(token):
return JsonResponse({
'message': 'Project token is already activated'
})
if settings.JET_BACKEND_WEB_BASE_URL.startswith('https') and not self.request.is_secure():
web_base_url = 'http{}'.format(settings.JET_BACKEND_WEB_BASE_URL[5:])
else:
web_base_url = settings.JET_BACKEND_WEB_BASE_URL
url = '{}/projects/register/'.format(web_base_url)
query_string = 'referrer={}'.format(quote(self.request.build_absolute_uri().encode('utf8')))
return HttpResponseRedirect('%s?%s' % (url, query_string))
|
11531405
|
from .. import bp
from flask_login import login_required
from flask import render_template, redirect, url_for, flash, request
from app.lib.base.provider import Provider
from app.lib.base.decorators import admin_required
@bp.route('/logs/shell', methods=['GET'])
@login_required
@admin_required
def shell_logs():
page = int(request.args.get('page', 1))
if page <= 0:
page = 1
provider = Provider()
shell = provider.shell()
shell_logs = shell.get_logs(page=page, per_page=20)
return render_template(
'config/system/shell_logs.html',
shell_logs=shell_logs
)
|
11531406
|
import abc
import bisect
import contextlib
import heapq
import itertools
from typing import Iterable
import lucene
from java.lang import Double, Float, Number, Object
from org.apache.lucene import analysis, util
class Atomic(metaclass=abc.ABCMeta):
"""Abstract base class to distinguish singleton values from other iterables."""
@classmethod
def __subclasshook__(cls, other):
return not issubclass(other, Iterable) or NotImplemented
for cls in (str, analysis.TokenStream, lucene.JArray_byte):
Atomic.register(cls)
class SpellChecker(dict):
"""Correct spellings and suggest words for queries.
Supply a vocabulary mapping words to (reverse) sort keys, such as document frequencies.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.words = sorted(self)
alphabet = ''.join(set(itertools.chain.from_iterable(self)))
self.suffix = alphabet and max(alphabet) * max(map(len, self))
def complete(self, prefix: str, count: int = None) -> list:
"""Return ordered suggested words for prefix."""
start = bisect.bisect_left(self.words, prefix)
stop = bisect.bisect_right(self.words, prefix + self.suffix, start)
words = self.words[start:stop]
if count is not None and count < len(words):
return heapq.nlargest(count, words, key=self.__getitem__)
words.sort(key=self.__getitem__, reverse=True)
return words
@contextlib.contextmanager
def suppress(exception):
"""Suppress specific lucene exception."""
try:
yield
except lucene.JavaError as exc:
if not exception.instance_(exc.getJavaException()):
raise
def convert(value):
"""Return python object from java Object."""
if util.BytesRef.instance_(value):
return util.BytesRef.cast_(value).utf8ToString()
if not Number.instance_(value):
return value.toString() if Object.instance_(value) else value
value = Number.cast_(value)
return value.doubleValue() if Float.instance_(value) or Double.instance_(value) else int(value.longValue())
|
11531424
|
from ..base import BaseModel
# https://vk.com/dev/objects/video
class Video(BaseModel):
id: int = None
owner_id: int = None
title: str = None
description: str = None
duration: int = None
photo_130: str = None
photo_320: str = None
photo_640: str = None
photo_800: str = None
date: int = None
adding_date: int = None
views: int = None
comments: int = None
player: str = None
access_key: str = None
processing: int = None
live: int = None
upcoming: int = None
is_favorite: bool = None
|
11531450
|
from os import path
from setuptools import setup, find_packages
with open("requirements.txt") as f:
requirements = f.read().splitlines()
with open(
path.join(path.abspath(path.dirname(__file__)), "README.md"), encoding="utf-8"
) as f:
long_description = f.read()
PACKAGE_KEYWORDS = [
"cisco",
"dna",
"dnacenter",
"python",
"api",
"sdk",
"netbox",
]
setup(
name="ciscodnacnetbox",
version="1.0.1",
description="Cisco DNA Center Integration with NetBox",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/robertcsapo/ciscodnacnetbox",
author="<NAME>",
author_email="<EMAIL>",
license="CISCO SAMPLE CODE LICENSE",
install_requires=requirements,
packages=find_packages(exclude=["img"]),
package_data={"ciscodnacnetbox": ["templates/*/*.html"]},
include_package_data=True,
python_requires=">=3.3",
zip_safe=False,
)
|
11531452
|
import tensorflow as tf
import numpy as np
import constant
grid_w = constant.GRID_W
grid_h = constant.GRID_H
def intensity_loss(gen_frames, gt_frames, l_num):
"""
Calculates the sum of lp losses between the predicted and ground truth frames.
@param gen_frames: The predicted frames at each scale.
@param gt_frames: The ground truth frames at each scale
@param l_num: 1 or 2 for l1 and l2 loss, respectively).
@return: The lp loss.
"""
return tf.reduce_mean(tf.abs((gen_frames - gt_frames) ** l_num))
def depth_consistency_loss3(warp2_depth, mesh):
shape = warp2_depth.get_shape().as_list()
# assign average depth value to each grid
depth_patches = tf.extract_image_patches(warp2_depth, [1,shape[1]/grid_h,shape[2]/grid_w,1], [1,shape[1]/grid_h,shape[2]/grid_w,1], [1,1,1,1], padding='VALID')
depth_map = tf.reduce_mean(depth_patches, axis=3)
depth_map = tf.reshape(depth_map, [shape[0], grid_h, grid_w])
ones = tf.ones_like(depth_map, dtype=tf.float32)
zeros = tf.zeros_like(depth_map, dtype=tf.float32)
##############################
# compute horizontal edges
w_edges = mesh[:,:,0:grid_w,:] - mesh[:,:,1:grid_w+1,:]
# compute angles of two successive horizontal edges
cos_w = tf.reduce_sum(w_edges[:,:,0:grid_w-1,:] * w_edges[:,:,1:grid_w,:],3) / (tf.sqrt(tf.reduce_sum(w_edges[:,:,0:grid_w-1,:]*w_edges[:,:,0:grid_w-1,:],3))*tf.sqrt(tf.reduce_sum(w_edges[:,:,1:grid_w,:]*w_edges[:,:,1:grid_w,:],3)))
# horizontal angle-preserving error for two successive horizontal edges
delta_w_angle = 1 - cos_w
# horizontal angle-preserving error for two successive horizontal grids
delta_w_angle = delta_w_angle[:,0:grid_h,:] + delta_w_angle[:,1:grid_h+1,:]
##############################
##############################
# compute vertical edges
h_edges = mesh[:,0:grid_h,:,:] - mesh[:,1:grid_h+1,:,:]
# compute angles of two successive vertical edges
cos_h = tf.reduce_sum(h_edges[:,0:grid_h-1,:,:] * h_edges[:,1:grid_h,:,:],3) / (tf.sqrt(tf.reduce_sum(h_edges[:,0:grid_h-1,:,:]*h_edges[:,0:grid_h-1,:,:],3))*tf.sqrt(tf.reduce_sum(h_edges[:,1:grid_h,:,:]*h_edges[:,1:grid_h,:,:],3)))
# vertical angle-preserving error for two successive vertical edges
delta_h_angle = 1 - cos_h
# vertical angle-preserving error for two successive vertical grids
delta_h_angle = delta_h_angle[:,:,0:grid_w] + delta_h_angle[:,:,1:grid_w+1]
##############################
error = []
# define the number of depth levels
depth_num = 32
for i in range(depth_num):
# compute the start/end depth value for i-th depth level
start_depth = i*(1./depth_num)
end_depth = (i+1)*(1./depth_num)
# get the 0-1 mask for the i-th depth level
depth_mask = tf.where(tf.logical_and((depth_map>start_depth) , (depth_map<=end_depth)), ones, zeros)
# successive depth grid on the horizontal dimension
depth_diff_w = (1-tf.abs(depth_mask[:,:,0:grid_w-1] - depth_mask[:,:,1:grid_w])) * depth_mask[:,:,0:grid_w-1]
error_w = depth_diff_w * delta_w_angle
# successive depth grid on the vertical dimension
depth_diff_h = (1-tf.abs(depth_mask[:,0:grid_h-1,:] - depth_mask[:,1:grid_h,:])) * depth_mask[:,0:grid_h-1,:]
error_h = depth_diff_h * delta_h_angle
error.append(tf.reduce_mean(error_w) + tf.reduce_mean(error_h))
loss = tf.stack(error, 0)
loss = tf.reduce_sum(loss)
return loss
|
11531516
|
from pyrez.models import APIResponseBase
class Status(APIResponseBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.indicator = kwargs.get("indicator", '') or ''
self.description = kwargs.get("description", '') or ''
|
11531566
|
from captcha.fields import ReCaptchaField
from captcha.widgets import ReCaptchaV3
from directory_forms_api_client.forms import GovNotifyEmailActionMixin
from django.forms import Textarea
from django.template.loader import render_to_string
from django.utils.html import mark_safe
from great_components import forms
from core.cms_slugs import (
PRIVACY_POLICY_URL__CONTACT_TRIAGE_FORMS_SPECIAL_PAGE,
TERMS_URL,
)
from core.constants import CONSENT_CHOICES
TERMS_LABEL = mark_safe(
'Tick this box to accept the '
f'<a href="{TERMS_URL}" target="_blank">terms and '
'conditions</a> of the great.gov.uk service.'
)
class NoOperationForm(forms.Form):
pass
class WhatAreYouSellingForm(forms.Form):
PRODUCTS = 'PRODUCTS'
SERVICES = 'SERVICES'
PRODUCTS_AND_SERVICES = 'PRODUCTS_AND_SERVICES'
CHOICES = (
(PRODUCTS, 'Products'),
(SERVICES, 'Services'),
(PRODUCTS_AND_SERVICES, 'Products and Services'),
)
choice = forms.ChoiceField(
label='',
widget=forms.RadioSelect(),
choices=CHOICES,
)
class ContactUsHelpForm(GovNotifyEmailActionMixin, forms.Form):
comment = forms.CharField(
label='Please give us as much detail as you can',
widget=Textarea,
)
given_name = forms.CharField(label='First name')
family_name = forms.CharField(label='Last name')
email = forms.EmailField()
captcha = ReCaptchaField(label='', label_suffix='', widget=ReCaptchaV3())
terms_agreed = forms.BooleanField(label=TERMS_LABEL)
class ProductSearchForm(forms.Form):
products = forms.CharField()
class CompanyNameForm(forms.Form):
name = forms.CharField()
class CompaniesHouseSearchForm(forms.Form):
term = forms.CharField()
class ConsentFieldMixin(forms.Form):
contact_consent = forms.MultipleChoiceField(
# label is set in init to avoid circular dependency
widget=forms.CheckboxSelectInlineLabelMultiple(
attrs={'id': 'checkbox-multiple'},
use_nice_ids=True,
),
choices=CONSENT_CHOICES,
required=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['contact_consent'].label = render_to_string(
'core/includes/contact-consent.html',
{'privacy_url': PRIVACY_POLICY_URL__CONTACT_TRIAGE_FORMS_SPECIAL_PAGE},
)
@staticmethod
def move_to_end(fields, name):
fields.remove(name)
fields.append(name)
def order_fields(self, field_order):
# move terms agreed and captcha to the back
field_order = field_order or list(self.fields.keys())
field_order = field_order[:]
self.move_to_end(fields=field_order, name='contact_consent')
if 'captcha' in field_order:
self.move_to_end(fields=field_order, name='captcha')
return super().order_fields(field_order)
|
11531649
|
from octopus.arch.wasm.emulator import WasmSSAEmulatorEngine
# complete wasm module
file_name = "examples/wasm/samples/fib.wasm"
# read file
with open(file_name, 'rb') as f:
raw = f.read()
# run the emulator for SSA
emul = WasmSSAEmulatorEngine(raw, 'fib')
emul.emulate()
# visualization of the cfg with SSA
emul.cfg.visualize(ssa=True)
|
11531668
|
from itertools import permutations
from unittest import TestCase
from sipa import forms
from wtforms import Form, PasswordField, ValidationError
class PasswordComplexityValidatorTest(TestCase):
class TestForm(Form):
password = PasswordField()
def validate(self, validator, password):
form = self.TestForm(data={'password': password})
field = form.password
validator(form, field)
def test_min_length(self):
min_length = 4
assert min_length > 1
validator = forms.PasswordComplexity(min_length=min_length,
min_classes=1)
for length in range(min_length):
with self.assertRaises(ValidationError):
self.validate(validator, 'a' * length)
for length in range(min_length, 2 * min_length):
self.validate(validator, 'a' * length)
def test_min_classes(self):
validator = forms.PasswordComplexity(min_length=1, min_classes=2)
class_representatives = ('a', 'A', '0', '~')
for representative in class_representatives:
with self.assertRaises(ValidationError):
self.validate(validator, representative)
for permutation in permutations(class_representatives, 2):
self.validate(validator, ''.join(permutation))
|
11531672
|
from glue import custom_viewer
from matplotlib.colors import LogNorm
from matplotlib.patches import Circle, Rectangle, Arc
from matplotlib.lines import Line2D
bball = custom_viewer('Shot Plot',
x='att(x)',
y='att(y)')
@bball.plot_data
def show_hexbin(axes, x, y):
axes.hexbin(x, y,
cmap='Purples',
gridsize=40,
norm=LogNorm(),
mincnt=1)
@bball.plot_subset
def show_points(axes, x, y, style):
axes.plot(x, y, 'o',
alpha=style.alpha,
mec=style.color,
mfc=style.color,
ms=style.markersize)
@bball.setup
def draw_court(axes):
c = '#777777'
opts = dict(fc='none', ec=c, lw=2)
hoop = Circle((0, 63), radius=9, **opts)
axes.add_patch(hoop)
box = Rectangle((-6 * 12, 0), 144, 19 * 12, **opts)
axes.add_patch(box)
inner = Arc((0, 19 * 12), 144, 144, theta1=0, theta2=180, **opts)
axes.add_patch(inner)
threept = Arc((0, 63), 474, 474, theta1=0, theta2=180, **opts)
axes.add_patch(threept)
opts = dict(c=c, lw=2)
axes.add_line(Line2D([237, 237], [0, 63], **opts))
axes.add_line(Line2D([-237, -237], [0, 63], **opts))
axes.set_ylim(0, 400)
axes.set_aspect('equal', adjustable='datalim')
|
11531708
|
from collections import namedtuple
from mwtypes import Timestamp
from articlequality.extractors import frwiki
def test_extractor():
Revision = namedtuple("Revisions", ['id', 'timestamp', 'sha1', 'text'])
class Page:
def __init__(self, title, namespace, revisions):
self.title = title
self.namespace = namespace
self.revisions = revisions
def __iter__(self):
return iter(self.revisions)
revisions = [
Revision(
1, Timestamp(0), "aaa",
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|avancement=e}}"
),
Revision(
2, Timestamp(1), "bbb",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|avancement=AdQ}}"
),
Revision(
3, Timestamp(2), "aaa",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|avancement=e}}"
),
Revision(
4, Timestamp(3), "ccc",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|Sélection transversale|faible\n" +
"|avancement=Ébauche}}"
),
Revision(
5, Timestamp(4), "aaa",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|avancement=e}}"
),
Revision(
6, Timestamp(4), "ccc",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|Sélection transversale|faible\n" +
"|avancement=Ébauche}}"
),
Revision(
7, Timestamp(5), "ddd",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|Sélection transversale|faible\n" +
"|avancement= bd }}"
),
Revision(
8, Timestamp(6), "eee",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|Sélection transversale|faible\n" +
"|avancement= Bon début }}"
),
Revision(
9, Timestamp(6), "eee",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|Sélection transversale|faible\n" +
"|avancement= b }}"
),
Revision(
10, Timestamp(7), "fff",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|Sélection transversale|faible\n" +
"|avancement= a }}"
),
Revision(
11, Timestamp(8), "fff",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|Sélection transversale|faible\n" +
"|avancement= ba }}"
),
Revision(
12, Timestamp(9), "fff",
"{{talk page}}" +
"{{Wikiprojet\n" +
"|Seconde Guerre mondiale|maximum\n" +
"|Japon|maximum\n" +
"|Forces armées des États-Unis|maximum\n" +
"|Nucléaire|maximum\n" +
"|Sélection transversale|faible\n" +
"|avancement= AdQ }}"
)
]
page = Page("Foobar", 1, revisions)
observations = frwiki.extract(page)
project_labels = {(ob['project'], ob['wp10']): ob
for ob in observations}
expected = [("wikiprojet", "e", Timestamp(0)),
("wikiprojet", "bd", Timestamp(5)),
("wikiprojet", "b", Timestamp(6)),
("wikiprojet", "a", Timestamp(7)),
("wikiprojet", "ba", Timestamp(8)),
("wikiprojet", "adq", Timestamp(9))]
print(project_labels)
for proj, lab, timestamp in expected:
ob = project_labels[(proj, lab)]
assert ob['timestamp'] == timestamp
|
11531727
|
import os
import itertools
from abc import ABC, abstractmethod
from collections import namedtuple
from math import log
Run = namedtuple("Run", ["cross", "stats"])
def do_send(gen, past=None):
""" Functions as `next' for value-accepting generators.
"""
try:
cross = gen.send(past)
except StopIteration:
cross = None
return cross
class SearcherInterface(ABC):
@property
@abstractmethod
def is_dynamic(self):
""" Whether or not this object's `generate' uses the return value of `yield'.
Determines compatiblity with 'dry run' feature"""
@abstractmethod
def generate(self):
""" A generator function which `yield's cross-terms. Invoked with `send', so can make use of past run info"""
def get_best(self):
"""Given a list of runs, returns the 'best' config"""
raise NotImplementedError("get_best not implemented")
class Step(SearcherInterface):
""" Tries `search_param' from `start'->`end' (not including end) in steps of update(val). Repeated calls to update should converge exactly to `end'"""
is_dynamic = False
def __init__(self, search_param, start, end, update):
self.search_param = search_param
self.start = start
self.end = end
assert(self.start != self.end)
self.update = update
def generate(self):
curr_val = self.start
while curr_val != self.end:
yield {self.search_param: curr_val}
curr_val = self.update(curr_val)
class GreedyStepper(SearcherInterface):
"""Greedy search for a list of 'Step' """
is_dynamic = True
def __init__(self, steppers, predicate):
self.steppers = steppers
self.predicate = predicate
self.best_cross = None
for s in steppers:
assert(isinstance(s, Step))
def generate(self):
# Start with initial terms:
gens = []
cross = {}
# Reversing isn't strictly necessary, but is nice because "pop()" pops from the back, so we get expected order
for s in reversed(self.steppers):
gen = s.generate()
cross.update(do_send(gen))
gens.append(gen)
baseline = yield cross
self.best_cross = cross.copy()
gen = None
while len(gens):
if gen is None:
gen = gens.pop()
partial = do_send(gen)
if partial:
# Valid term
cross.update(partial)
result = yield cross
if self.predicate(baseline, result):
baseline = result
self.best_cross = cross.copy()
continue
else:
cross = self.best_cross.copy()
# Reset because we didn't like the result:
gen = None
def get_best(self):
return self.best_cross.copy()
class CartesianProduct(SearcherInterface):
"""A 'dumb' search, which enumerates all terms in `grid', optionally ordered to reduce the number of engine rebuilds"""
is_dynamic = False
def __init__(self, grid, no_rebuild_params=None):
self.grid = grid
self.no_rebuild_params = no_rebuild_params
def generate(self):
# We can be fancy:
# If we choose the noRebuildNeeded params to be on the innerDim of the cross product, that will minimize the number of rebuilds we need
# (Because we only have the last engine cached).
# Note, in the general case (of scheduling arbitrary jobs where the only thing known is if running job B after job A requires a rebuild), this reduces to finding a minimum cost hamiltonian path, which is NP Complete
if self.no_rebuild_params:
sorted_keys = sorted(self.grid.keys(), key=lambda x: x in self.no_rebuild_params)
sorted_vals = (self.grid[k] for k in sorted_keys)
cross_terms = (i for i in itertools.product(*sorted_vals))
named_terms = (dict(zip(sorted_keys, term)) for term in cross_terms)
else:
cross_terms = (i for i in itertools.product(*self.grid.values()))
named_terms = (dict(zip(self.grid.keys(), term)) for term in cross_terms)
for term_dict in named_terms:
yield term_dict
class Bisect(SearcherInterface):
""" Runs binary search for search_param.
Looks over the range [`lower_bound', `upper_bound') for the rightmost value for which predicate returns True.
`predicate' takes a `Run' object and returns True or False.
"""
is_dynamic = True
def __init__(self, search_param, lower_bound, upper_bound, step_size, predicate):
self.search_param = search_param
self.plausible = range(lower_bound, upper_bound, step_size)
assert(len(self.plausible) > 2)
self.step_size = step_size
self.predicate = predicate
self.best = {self.search_param: lower_bound}
def was_good(self, past_run):
return self.predicate(past_run.stats)
def generate(self):
lower_idx = 0
upper_idx = len(self.plausible)
while lower_idx + 1 != upper_idx:
mid_idx = (upper_idx + lower_idx) // 2
term = {self.search_param: self.plausible[mid_idx]}
# The following statement both
# - Returns the term to the caller
# - Resumes execution with `result' taking the value of the caller-sent-object ("Run").
result = yield term
if self.predicate(result):
lower_idx = mid_idx
self.best = term.copy()
else:
upper_idx = mid_idx
def get_best(self):
return self.best
class FindUpperBound(SearcherInterface):
""" Given a starting point, will attempt to maximize the parameter efficiently.
An example of 'delegating' to another searcher internally via 'yield from'"""
is_dynamic = True
def __init__(self, search_param, start, predicate, num_bisect_steps):
self.search_param = search_param
self.start = start
self.predicate = predicate
self.num_bisect_steps = num_bisect_steps
self.best = {self.search_param: start}
def generate(self):
# First we need to find an upper bound, we can do this by jumping in powers of two:
curr_value = self.start * 2
result = yield {self.search_param: curr_value}
while self.predicate(result):
curr_value *= 2
result = yield {self.search_param: curr_value}
upper_bound = int(curr_value)
lower_bound = int(curr_value / 2)
# If we want to do at most num_bisect_steps, we know that the number of elements we will need to check is (upper_bound - lower_bound)/StepSize, so the number of checks done by binary search is log_2(Range/StepSize) = num_steps, so
# Range / 2^(num_steps) = step_size
step_size = int((upper_bound - lower_bound) / (2**self.num_bisect_steps))
bisector = Bisect(search_param=self.search_param,
lower_bound=lower_bound,
upper_bound=upper_bound,
step_size=step_size,
predicate=self.predicate)
g = bisector.generate()
yield from g
self.best = bisector.get_best()
def get_best(self):
return self.best.copy()
class Overlay(SearcherInterface):
""" Overlays `overlay_dict' to each cross term produced by `subject' """
@property
def is_dynamic(self):
return self.subject.is_dynamic
def __init__(self, subject, overlay_dict):
self.subject = subject
self.overlay_dict = overlay_dict
def generate(self):
gen = self.subject.generate()
term_from_gen = do_send(gen)
while term_from_gen:
cross = term_from_gen
cross.update(self.overlay_dict)
result = yield cross
term_from_gen = do_send(gen, result)
def get_best(self):
subject_best = self.subject.get_best()
subject_best.update(self.overlay_dict)
return subject_best
class Composer(SearcherInterface):
""" Compose multiple SearcherInterfaces with customizable logic to connect them.
Each SearcherInterface must be associated with a predicate which returns True if we should use the next item
from the generator, and False if we should query the next generator.
This predicate should take a single `Run' and return True or False. This predicate can be (in preference order):
- Passed in as the second position of a tuple [ie: (SearcherInterfaceObject, predicate)]
- Used as the same `predicate' property of the SearcherInterface in question
- Always True (Default) """
is_dynamic = True
def __init__(self, searchersAndPreds, timeout):
self.timeout = timeout
self.searchers = []
self.predicates = []
def default_predicate(x): return True
self.timeout = timeout
for item in searchersAndPreds:
if isinstance(item, tuple):
# Expect (Searcher, lambda)
assert(len(item) == 2)
assert(callable(item[1]))
self.searchers.append(item[0])
self.predicates.append(item[1])
else:
self.searchers.append(item)
# if item has a predicate, we capture that, otherwise, use our default:
if hasattr(item, "predicate"):
assert(callable(item.predicate))
self.predicates.append(item.predicate)
else:
self.predicates.append(default_predicate)
def generate(self):
curr_cross = {}
gens = []
num_tries = 0
# Walk through all searchers to get initial values:
for s in self.searchers:
gen = s.generate()
curr_cross.update(do_send(gen))
gens.append(gen)
while num_tries != self.timeout:
past_run = yield curr_cross
num_tries += 1
past_cross = curr_cross.copy()
term_to_twiddle = None
for g, pred in zip(gens, self.predicates):
if pred(past_run):
term_to_twiddle = do_send(g, past_run)
# May be None, in which case, we need to query the next item
if term_to_twiddle:
break
if term_to_twiddle is None:
# If we walked through all our generators, and nothing wanted to run, we "forcefully"
# query all generators (but backwards/LIFO style)
for g in reversed(gens):
term_to_twiddle = do_send(g, past_run)
if term_to_twiddle:
break
if term_to_twiddle is None:
# If we _still_ have nothing to send, we're completely exausted, so we're done generating
break
# We now have a partial term:
curr_cross.update(term_to_twiddle)
|
11531746
|
from __future__ import print_function
import argparse
import datetime
import logging
import random
import threading
import time
try:
import queue
from queue import Queue
except ImportError:
import Queue as queue
from queue import Queue
import gpudb
from gpudb import GPUdbColumnProperty as GCP, GPUdbRecordColumn as GRC, \
GPUdbTableMonitor
"""
This example demonstrates a scenario where the GPUdbTableMonitor.Client class
might be needed to be used in a code which already runs in it's own thread.
Since the table monitor Client class itself runs threads internally, it is
possible to pass on the notification data received to the user code using a
shared Queue, which this example shows.
The class QueuedGPUdbTableMonitor derives from GPUdbTableMonitor.Client class
and defines the callback methods.
The class TableMonitorExampleClient is a class running in its own thread
and communicating with an instance of QueuedGPUdbTableMonitor class using
a Queue instance.
The main method does the following:
1. Creates a Queue instance
2. Creates an instance of TableMonitorExampleClient with the Queue instance
created.
3. Creates an instance of QueuedGPUdbTableMonitor class with the Queue
instance.
4. Starts the client.
5. Starts the table monitor.
6. Performs some table operations like inserts and deletes.
7. The client class receives the notification data in the shared Queue
and prints out the data received.
"""
class QueuedGPUdbTableMonitor(GPUdbTableMonitor.Client):
""" An example implementation which just passes on the received objects
to a simple Queue which is passed in as an argument to the constructor
of this class.
"""
def __init__(self, db, tablename,
record_queue, options = None):
""" Constructor for QueuedGPUdbTableMonitor class
Args:
db (GPUdb):
The handle to the GPUdb
tablename (str):
Name of the table to create the monitor for
record_queue (queue.Queue):
A Queue instance where notifications along with payloads can be
passed into for client to consume and act upon
options (GPUdbTableMonitor.Client.Options):
Options instance which is passed on to the super class
GPUdbTableMonitor constructor
"""
# Define the callback methods and create the objects of type
# GPUdbTableMonitor.Callback wrapping the callback methods according to
# type of the callback object. Pass on the list of such objects to the
# GPUdbTableMonitor.Client constructor to receive notifications of the
# events of interest and implement custom processing of the payloads
# received. The default behaviour only logs the payloads and does not
# do anything more useful.
# Create the list of callbacks objects which are to be passed to the
# 'GPUdbTableMonitor.Client' class constructor
callbacks = [
GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.INSERT_RAW,
self.on_insert_raw,
self.on_error),
GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.INSERT_DECODED,
self.on_insert_decoded,
self.on_error,
GPUdbTableMonitor.Callback.InsertDecodedOptions( GPUdbTableMonitor.Callback.InsertDecodedOptions.DecodeFailureMode.ABORT )),
GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.UPDATED,
self.on_update,
self.on_error),
GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.DELETED,
self.on_delete,
self.on_error),
GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.TABLE_DROPPED,
self.on_table_dropped,
self.on_error),
GPUdbTableMonitor.Callback(GPUdbTableMonitor.Callback.Type.TABLE_ALTERED,
self.on_table_altered,
self.on_error)
]
# Invoke the base class constructor. This invocation is mandatory for
# the table monitor to be actually functional.
super(QueuedGPUdbTableMonitor, self).__init__(
db, tablename,callback_list=callbacks,
options=options)
self.record_queue = record_queue
def on_insert_raw(self, record):
"""Callback method which is invoked with the raw payload bytes
received from the table monitor when a new record is inserted
Args:
record (bytes): This is a collection of undecoded bytes. Decoding
is left to the user who uses this callback.
"""
self._logger.info("Payload received : %s " % record)
self.record_queue.put("Record inserted (raw) = %s" % record)
def on_insert_decoded(self, record):
"""Callback method which is invoked with the decoded payload record
received from the table monitor when a new record is inserted
Args:
record (dict): This will be a dict in the format given below
{u'state_province': u'--', u'city': u'Auckland',
u'temperature': 57.5, u'country': u'New Zealand',
u'time_zone': u'UTC+12',
u'ts': u'2020-09-28 00:28:37.481119', u'y': -36.840556,
u'x': 174.74}
"""
self._logger.info("Payload received : %s " % record)
self.record_queue.put("Record inserted (decoded) = %s" % record)
def on_update(self, count):
"""Callback method which is invoked with the number of records updated
as received from the table monitor when records are updated
Args:
count (int): Number of records updated.
"""
self._logger.info("Update count : %s " % count)
self.record_queue.put("Update count : %s " % count)
def on_delete(self, count):
"""Callback method which is invoked with the number of records updated
as received from the table monitor when records are deleted
Args:
count (int): Number of records deleted.
"""
self._logger.info("Delete count : %s " % count)
self.record_queue.put("Delete count : %s " % count)
def on_table_dropped(self, table_name):
"""Callback method which is invoked with the name of the table which
is dropped when the table monitor is in operation.
Args:
table_name (str): Name of the table dropped.
"""
self._logger.error("Table %s dropped " % table_name)
self.record_queue.put("Table %s dropped " % table_name)
def on_table_altered(self, message):
"""Callback method which is invoked with the name of the table which
is altered when the table monitor is in operation.
Args:
message (str): Name of the table altered.
"""
self._logger.error("Table %s altered " % message)
self.record_queue.put("Table %s altered " % message)
def on_error(self, message):
"""Callback method which is invoked with the error message
when some error has occurred.
Args:
message: The error message; often wrapping an exception
raised.
"""
self._logger.error("Error occurred " % message)
self.record_queue.put("Error occurred " % message)
class TableMonitorExampleClient(threading.Thread):
def __init__(self, table_monitor, work_queue):
"""
[summary]
Args:
table_monitor (GPUdbTableMonitor.Client): An instance of
GPUdbTableMonitor.Client class or some derivative of it.
work_queue (Queue): A Queue instance shared by this client and
the GPUdbTableMonitor.Client subclass for doing the notification
message exchange as they are received by the table monitor
for various events (table operation related and otherwise)
"""
super(TableMonitorExampleClient, self).__init__()
self.table_monitor = table_monitor
self.work_queue = work_queue
self.kill = False
def run(self):
while not self.kill:
print(self.kill)
print("Looking for new items in queue ...")
item = self.work_queue.get() # timeout=1
if item is None:
break
else:
print(item)
print("Exiting Client ...")
def close(self):
print("In close method ...")
self.kill = True
self.work_queue.put(None)
self.join()
""" Load random city weather data into a "history" table, in batches. Each
batch will be loaded 2 seconds apart, to give the table monitor time to push
that batch to the message queue and the queue client time to process the
batch
"""
def load_data(table_name):
# Base data set, from which cities will be randomly chosen, with a random
# new temperature picked for each, per batch loaded
city_data = [
["Washington", "DC", "USA", -77.016389, 38.904722, 58.5, "UTC-5"],
["Paris", "TX", "USA", -95.547778, 33.6625, 64.6, "UTC-6"],
["Memphis", "TN", "USA", -89.971111, 35.1175, 63, "UTC-6"],
["Sydney", "Nova Scotia", "Canada", -60.19551, 46.13631, 44.5, "UTC-4"],
["La Paz", "Baja California Sur", "Mexico", -110.310833, 24.142222, 77, "UTC-7"],
["St. Petersburg", "FL", "USA", -82.64, 27.773056, 74.5, "UTC-5"],
["Oslo", "--", "Norway", 10.75, 59.95, 45.5, "UTC+1"],
["Paris", "--", "France", 2.3508, 48.8567, 56.5, "UTC+1"],
["Memphis", "--", "Egypt", 31.250833, 29.844722, 73, "UTC+2"],
["St. Petersburg", "--", "Russia", 30.3, 59.95, 43.5, "UTC+3"],
["Lagos", "Lagos", "Nigeria", 3.384082, 6.455027, 83, "UTC+1"],
["La Paz", "<NAME>", "Bolivia", -68.15, -16.5, 44, "UTC-4"],
["Sao Paulo", "Sao Paulo", "Brazil", -46.633333, -23.55, 69.5, "UTC-3"],
["Santiago", "Santiago Province", "Chile", -70.666667, -33.45, 62, "UTC-4"],
["Buenos Aires", "--", "Argentina", -58.381667, -34.603333, 65, "UTC-3"],
["Manaus", "Amazonas", "Brazil", -60.016667, -3.1, 83.5, "UTC-4"],
["Sydney", "New South Wales", "Australia", 151.209444, -33.865, 63.5, "UTC+10"],
["Auckland", "--", "New Zealand", 174.74, -36.840556, 60.5, "UTC+12"],
["Jakarta", "--", "Indonesia", 106.816667, -6.2, 83, "UTC+7"],
["Hobart", "--", "Tasmania", 147.325, -42.880556, 56, "UTC+10"],
["Perth", "Western Australia", "Australia", 115.858889, -31.952222, 68, "UTC+8"]
]
# Grab a handle to the history table for inserting new weather records
history_table = gpudb.GPUdbTable(name=table_name, db=h_db)
random.seed(0)
# Insert 5 batches of city weather records
# ========================================
for iter in range(5):
city_updates = []
# Grab a random set of cities
cities = random.sample(city_data, k=random.randint(1, int(len(city_data) / 2)))
# Create a list of weather records to insert
for city in cities:
# Pick a random temperature for each city at the current time
city_update = list(city)
city_update[5] = city_update[5] + random.randrange(-10, 10)
city_update.append(datetime.datetime.now())
city_updates.append(city_update)
# Insert the records into the table and allow time for table monitor to
# process them before inserting the next batch
print
print("[Main/Loader] Inserting <%s> new city temperatures..." % len(city_updates))
history_table.insert_records(city_updates)
time.sleep(2)
# end load_data_and_wait()
""" Create the city weather "history" & "status" tables used in this example
"""
def create_table(table_name):
# Put both tables into the "examples" schema
schema_option = {"collection_name": "examples"}
# Create a column list for the "history" table
columns = [
["city", GRC._ColumnType.STRING, GCP.CHAR16],
["state_province", GRC._ColumnType.STRING, GCP.CHAR32],
["country", GRC._ColumnType.STRING, GCP.CHAR16],
["x", GRC._ColumnType.DOUBLE],
["y", GRC._ColumnType.DOUBLE],
["temperature", GRC._ColumnType.DOUBLE],
["time_zone", GRC._ColumnType.STRING, GCP.CHAR8],
["ts", GRC._ColumnType.STRING, GCP.DATETIME]
]
# Create the "history" table using the column list
gpudb.GPUdbTable(
columns,
name=table_name,
options=schema_option,
db=h_db
)
# end create_tables()
""" Drop the city weather "history" table
"""
def clear_table(table_name):
# Drop all the tables
h_db.clear_table(table_name)
# end clear_tables()
def delete_records(h_db, table_name):
"""
Args:
h_db:
Returns:
"""
print("In delete records ...")
history_table = gpudb.GPUdbTable(name=table_name, db=h_db)
pre_delete_records = history_table.size()
print("Records before = %s" % pre_delete_records)
delete_expr = ["state_province = 'Sao Paulo'"]
history_table.delete_records(expressions=delete_expr)
post_delete_records = history_table.size()
print("Records after = %s" % post_delete_records)
return pre_delete_records - post_delete_records
if __name__ == '__main__':
# Set up args
parser = argparse.ArgumentParser(description='Run table monitor example.')
parser.add_argument('command', nargs="?",
help='command to execute (currently only "clear" to remove the example tables')
parser.add_argument('--host', default='localhost', help='Kinetica host to '
'run '
'example against')
parser.add_argument('--port', default='9191', help='Kinetica port')
parser.add_argument('--username', help='Username of user to run example with')
parser.add_argument('--password', help='<PASSWORD>')
args = parser.parse_args()
# Establish connection with an instance of Kinetica on port 9191
h_db = gpudb.GPUdb(encoding="BINARY", host=args.host, port="9191",
username=args.username, password=args.password)
# Identify the message queue, running on port 9002
table_monitor_queue_url = "tcp://" + args.host + ":9002"
tablename = 'examples.table_monitor_history'
# If command line arg is clear, just clear tables and exit
if (args.command == "clear"):
clear_table(tablename)
quit()
clear_table(tablename)
create_table(tablename)
work_queue = Queue()
# create the `QueuedGPUdbTableMonitor` class and pass in the Queue instance.
monitor = QueuedGPUdbTableMonitor(h_db, tablename,
record_queue=work_queue)
monitor.logging_level = logging.DEBUG
# Create the `TableMonitorExampleClient` class and pass in the Queue
# instance.
client = TableMonitorExampleClient(table_monitor=monitor,
work_queue=work_queue)
# Start the client
client.start()
# Start the table monitor
monitor.start_monitor()
load_data(tablename)
delete_records(h_db, tablename)
time.sleep(10)
# Close the client
client.close()
# Stop the Table monitor after the client is done with it
monitor.stop_monitor()
|
11531758
|
import os
from concurrent import futures
import grpc
import time
import inference_service_pb2, inference_service_pb2_grpc
CHUNK_SIZE = 1024 * 1024 # 1MB
def get_file_chunks(filename):
with open(filename, 'rb') as f:
while True:
piece = f.read(CHUNK_SIZE);
if len(piece) == 0:
return
yield inference_service_pb2.Chunk(buffer=piece)
def get_result_chunks(filename):
with open(filename, 'rb') as f:
while True:
piece = f.read(CHUNK_SIZE);
if len(piece) == 0:
return
yield inference_service_pb2.Chunk(buffer=piece)
class InferenceClient:
def __init__(self, address):
channel = grpc.insecure_channel(address)
self.stub = inference_service_pb2_grpc.InfererStub(channel)
def infer(self, in_file_name):
chunks_generator = get_file_chunks(in_file_name)
response = self.stub.infer(chunks_generator)
return response.length
|
11531853
|
import torch
from torch import nn
import numpy as np
class ProjectPoint2Image(nn.Module):
"""Differentiable renderer for point cloud"""
def __init__(self, K, im_width, im_height, uv_only=False):
super(ProjectPoint2Image, self).__init__()
self.K = K
self.im_width = im_width
self.im_height = im_height
ui, vi = np.meshgrid(range(im_width), range(im_height))
grid = np.hstack((vi.reshape(-1,1), ui.reshape(-1,1))).astype(np.float32)
self.grid = torch.tensor(grid).to(K.device)
# params of gaussian kernel at every projected point, adapt wrt. intrinsics
self.sigma = K[0,0].item()/16
# if uv_only then return the re-projected uv coordinates, not the image
self.uv_only = uv_only
def forward(self, RT, pts_3d, pts_feat, pts_scale):
"""Project onto image
Args:
RT (torch.FloatTensor): camera extrinsics, Bx3x4
pts_3d (torch.FloatTensor): point locations, BxNx3
pts_feat (torch.FloatTensor): point features, BxNxC
pts_scale (torch.FloatTensor): point scales, BxN
Returns:
img (torch.FloatTensor): projected image, BxCxHxW
"""
device = RT.device
bs = RT.shape[0]
R = RT[:, :3,:3]
T = RT[:, :3, 3]
if pts_3d.shape[1]==1: # larger blob if there is single point
self.sigma = self.K[0,0].item()/16.
else:
self.sigma = self.K[0,0].item()/32.
# transform points from world coordinate to camera coordinate
points_local = (R @ pts_3d.transpose(1,2)).transpose(1,2) + T.view(bs,1,3)
# perspective projection
points_proj = self.K.unsqueeze(0).to(device) @ points_local.transpose(1,2) # Bx3xN
points_mask = points_proj[:,2]>0.1 #BxN
u = points_proj[:,0,:]/points_proj[:,2,:].clamp(min=0.1)
v = points_proj[:,1,:]/points_proj[:,2,:].clamp(min=0.1)
uv = torch.cat((v.reshape(bs,-1,1), u.reshape(bs,-1,1)),dim=2)
if self.uv_only:
uvz = torch.cat((uv, points_proj[:,2,:].reshape(bs,-1,1)),dim=2)
return uvz
# project points to image plance with soft weights
# to differientiate to the geometry
distance = uv.view(bs,-1,1,2) - self.grid.view(1,1,-1,2).expand(bs,-1,-1,-1).to(device) # B x N x (HxW) x 2
distance_sq = distance[...,0]**2 + distance[...,1]**2 # B x N x (HxW)
weight = torch.exp(-distance_sq / (pts_scale.view(bs,-1,1) * self.sigma * self.sigma))
weight = weight * points_mask.view(bs,-1,1).float()
# sum up features from all 3d points for each grid point
img = pts_feat.transpose(1,2) @ weight # (B x C x N) x (B x N x (HxW)) --> B x C x (HxW)
img = img.view(bs, -1, self.im_height, self.im_width)
return img
|
11531899
|
BUILDDIR = '#build/release'
DISTDIR = '#Mitsuba.app'
CXX = 'icpc'
CC = 'icc'
CCFLAGS = ['-arch', 'i386', '-mmacosx-version-min=10.7', '-mfpmath=sse', '-isysroot', '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.7.sdk', '-O3', '-ipo', '-no-prec-div', '-xSSE3', '-fp-model', 'fast=2', '-openmp', '-wd279', '-wd1875', '-Wall', '-g', '-pipe', '-DMTS_DEBUG', '-DSINGLE_PRECISION', '-DSPECTRUM_SAMPLES=3', '-DMTS_SSE', '-DMTS_HAS_COHERENT_RT', '-fvisibility=hidden']
CXXFLAGS = ['-std=c++0x']
LINKFLAGS = ['-g', '-framework', 'OpenGL', '-framework', 'Cocoa', '-arch', 'i386', '-mmacosx-version-min=10.7', '-Wl,-syslibroot,/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.7.sdk', '-openmp', '-Wl,-headerpad,128', '-wd11012']
BASEINCLUDE = ['#include', '#dependencies/include']
BASELIBDIR = ['#dependencies/lib']
BASELIB = ['m', 'pthread', 'gomp', 'Half']
OEXRINCLUDE = ['#dependencies/include/OpenEXR']
OEXRLIB = ['IlmImf', 'Imath', 'Iex', 'z']
PNGLIB = ['png']
JPEGLIB = ['jpeg']
XERCESLIB = ['xerces-c']
GLLIB = ['GLEWmx', 'objc']
GLFLAGS = ['-DGLEW_MX']
BOOSTINCLUDE = ['#dependencies']
BOOSTLIB = ['boost_filesystem', 'boost_system', 'boost_thread']
PYTHON26INCLUDE= ['/System/Library/Frameworks/Python.framework/Versions/2.6/Headers']
PYTHON26LIBDIR = ['/System/Library/Frameworks/Python.framework/Versions/2.6/lib']
PYTHON26LIB = ['boost_python26', 'boost_system']
PYTHON27INCLUDE= ['/System/Library/Frameworks/Python.framework/Versions/2.7/Headers']
PYTHON27LIBDIR = ['/System/Library/Frameworks/Python.framework/Versions/2.7/lib']
PYTHON27LIB = ['boost_python27', 'boost_system']
PYTHON33INCLUDE= ['#dependencies/include/python3.3']
PYTHON33LIB = ['boost_python33', 'boost_system']
PYTHON34INCLUDE= ['#dependencies/include/python3.4']
PYTHON34LIB = ['boost_python34', 'boost_system']
COLLADAINCLUDE = ['#dependencies/include/collada-dom', '#dependencies/include/collada-dom/1.4']
COLLADALIB = ['collada14dom24']
QTDIR = '#dependencies'
FFTWLIB = ['fftw3.3']
|
11531907
|
import importlib.util
import os
import platform
import shutil
import subprocess
from pathlib import Path
from setuptools.command.build_ext import build_ext
from .build_ext_option import BuildExtOption, add_new_build_ext_option
from .cmake_extension import CMakeExtension
# These options are listed in `python setup.py build_ext -h`
custom_options = [
BuildExtOption(
variable="define",
short="D",
help="Create or update CMake cache " "(example: '-DBAR=b;FOO=f')",
),
BuildExtOption(
variable="component",
short="C",
help="Install only a specific CMake component (example: '-Cbindings')",
),
BuildExtOption(
variable="no-cmake-extension",
short="K",
help="Disable a CMakeExtension module (examples: '-Kall', '-Kbar', '-Kbar;foo')",
),
]
for o in custom_options:
add_new_build_ext_option(option=o, override=True)
class BuildExtension(build_ext):
"""
Setuptools build extension handler.
It processes all the extensions listed in the 'ext_modules' entry.
"""
def initialize_options(self):
# Initialize base class
build_ext.initialize_options(self)
# Initialize the '--define' custom option, overriding the pre-existing one.
# Originally, it was aimed to pass C preprocessor definitions, but instead we
# use it to pass custom configuration options to CMake.
self.define = None
# Initialize the '--component' custom option.
# It overrides the content of the cmake_component option of CMakeExtension.
self.component = None
# Initialize the 'no-cmake-extension' custom option.
# It allows disabling one or more CMakeExtension from the command line.
self.no_cmake_extension = None
def finalize_options(self):
# Parse the custom CMake options and store them in a new attribute
defines = [] if self.define is None else self.define.split(";")
self.cmake_defines = [f"-D{define}" for define in defines]
# Parse the disabled CMakeExtension modules and store them in a new attribute
self.no_cmake_extensions = (
[]
if self.no_cmake_extension is None
else self.no_cmake_extension.split(";")
)
# Call base class
build_ext.finalize_options(self)
def run(self) -> None:
"""
Process all the registered extensions executing only the CMakeExtension objects.
"""
# Filter the CMakeExtension objects
cmake_extensions = [e for e in self.extensions if isinstance(e, CMakeExtension)]
if len(cmake_extensions) == 0:
raise ValueError("No CMakeExtension objects found")
# Check that CMake is installed
if shutil.which("cmake") is None:
raise RuntimeError("Required command 'cmake' not found")
# Check that Ninja is installed
if shutil.which("ninja") is None:
raise RuntimeError("Required command 'ninja' not found")
for ext in cmake_extensions:
# Disable the extension if specified in the command line
if (
ext.name in self.no_cmake_extensions
or "all" in self.no_cmake_extensions
):
continue
# Disable all extensions if this env variable is present
disabled_set = {"0", "false", "off", "no"}
env_var_name = "CMAKE_BUILD_EXTENSION_ENABLED"
if (
env_var_name in os.environ
and os.environ[env_var_name].lower() in disabled_set
):
continue
self.build_extension(ext)
def build_extension(self, ext: CMakeExtension) -> None:
"""
Build a CMakeExtension object.
Args:
ext: The CMakeExtension object to build.
"""
if self.inplace and ext.disable_editable:
print(f"Editable install recognized. Extension '{ext.name}' disabled.")
return
# Export CMAKE_PREFIX_PATH of all the dependencies
for pkg in ext.cmake_depends_on:
try:
importlib.import_module(pkg)
except ImportError:
raise ValueError(f"Failed to import '{pkg}'")
init = importlib.util.find_spec(pkg).origin
BuildExtension.extend_cmake_prefix_path(path=str(Path(init).parent))
# The ext_dir directory can be thought as a temporary site-package folder.
#
# Case 1: regular installation.
# ext_dir is the folder that gets compressed to make the wheel archive. When
# installed, the archive is extracted in the active site-package directory.
# Case 2: editable installation.
# ext_dir is the in-source folder containing the Python packages. In this case,
# the CMake project is installed in-source.
ext_dir = Path(self.get_ext_fullpath(ext.name)).parent.absolute()
cmake_install_prefix = ext_dir / ext.install_prefix
# CMake configure arguments
configure_args = [
"-GNinja",
f"-DCMAKE_BUILD_TYPE={ext.cmake_build_type}",
f"-DCMAKE_INSTALL_PREFIX:PATH={cmake_install_prefix}",
# Fix #26: https://github.com/diegoferigo/cmake-build-extension/issues/26
f"-DCMAKE_MAKE_PROGRAM={shutil.which('ninja')}",
]
# Extend the configure arguments with those passed from the extension
configure_args += ext.cmake_configure_options
# CMake build arguments
build_args = ["--config", ext.cmake_build_type]
if platform.system() == "Windows":
configure_args += []
elif platform.system() in {"Linux", "Darwin"}:
configure_args += []
else:
raise RuntimeError(f"Unsupported '{platform.system()}' platform")
# Parse the optional CMake options. They can be passed as:
#
# python setup.py build_ext -D"BAR=Foo;VAR=TRUE"
# python setup.py bdist_wheel build_ext -D"BAR=Foo;VAR=TRUE"
# python setup.py install build_ext -D"BAR=Foo;VAR=TRUE"
# python setup.py install -e build_ext -D"BAR=Foo;VAR=TRUE"
# pip install --global-option="build_ext" --global-option="-DBAR=Foo;VAR=TRUE" .
#
configure_args += self.cmake_defines
# Get the absolute path to the build folder
build_folder = str(Path(".").absolute() / f"{self.build_temp}_{ext.name}")
# Make sure that the build folder exists
Path(build_folder).mkdir(exist_ok=True, parents=True)
# 1. Compose CMake configure command
configure_command = [
"cmake",
"-S",
ext.source_dir,
"-B",
build_folder,
] + configure_args
# 2. Compose CMake build command
build_command = ["cmake", "--build", build_folder] + build_args
# 3. Compose CMake install command
install_command = ["cmake", "--install", build_folder]
# If the cmake_component option of the CMakeExtension is used, install just
# the specified component.
if self.component is None and ext.cmake_component is not None:
install_command.extend(["--component", ext.cmake_component])
# Instead, if the `--component` command line option is used, install just
# the specified component. This has higher priority than what specified in
# the CMakeExtension.
if self.component is not None:
install_command.extend(["--component", self.component])
print("")
print("==> Configuring:")
print(f"$ {' '.join(configure_command)}")
print("")
print("==> Building:")
print(f"$ {' '.join(build_command)}")
print("")
print("==> Installing:")
print(f"$ {' '.join(install_command)}")
print("")
# Call CMake
subprocess.check_call(configure_command)
subprocess.check_call(build_command)
subprocess.check_call(install_command)
# Write content to the top-level __init__.py
if ext.write_top_level_init is not None:
with open(file=cmake_install_prefix / "__init__.py", mode="w") as f:
f.write(ext.write_top_level_init)
# Write content to the bin/__main__.py magic file to expose binaries
if len(ext.expose_binaries) > 0:
bin_dirs = {str(Path(d).parents[0]) for d in ext.expose_binaries}
import inspect
main_py = inspect.cleandoc(
f"""
from pathlib import Path
import subprocess
import sys
def main():
binary_name = Path(sys.argv[0]).name
prefix = Path(__file__).parent.parent
bin_dirs = {str(bin_dirs)}
binary_path = ""
for dir in bin_dirs:
path = prefix / Path(dir) / binary_name
if path.is_file():
binary_path = str(path)
break
path = Path(str(path) + ".exe")
if path.is_file():
binary_path = str(path)
break
if not Path(binary_path).is_file():
name = binary_path if binary_path != "" else binary_name
raise RuntimeError(f"Failed to find binary: {{ name }}")
sys.argv[0] = binary_path
result = subprocess.run(args=sys.argv, capture_output=False)
exit(result.returncode)
if __name__ == "__main__" and len(sys.argv) > 1:
sys.argv = sys.argv[1:]
main()"""
)
bin_folder = cmake_install_prefix / "bin"
Path(bin_folder).mkdir(exist_ok=True, parents=True)
with open(file=bin_folder / "__main__.py", mode="w") as f:
f.write(main_py)
@staticmethod
def extend_cmake_prefix_path(path: str) -> None:
abs_path = Path(path).absolute()
if not abs_path.exists():
raise ValueError(f"Path {abs_path} does not exist")
if "CMAKE_PREFIX_PATH" in os.environ:
os.environ[
"CMAKE_PREFIX_PATH"
] = f"{str(path)}:{os.environ['CMAKE_PREFIX_PATH']}"
else:
os.environ["CMAKE_PREFIX_PATH"] = str(path)
|
11531960
|
from tests.utils import W3CTestCase
class TestFlexbox_Flex10N(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_flex-1-0-N'))
|
11531969
|
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
# plotting Myanmar unicode characters with matplotlib
# Note: plotting syllables are not working with this program
# written by <NAME>, Visiting Professor, LST, NECTEC, Thailand
# ref: https://jdhao.github.io/2018/04/08/matplotlib-unicode-character/
# ref: https://stackoverflow.com/questions/3899980/how-to-change-the-font-size-on-a-matplotlib-plot
# how to run: python plot-unicode-char.py
fontPath = '/home/ye/.local/share/fonts/myanmar3.ttf'
prop = fm.FontProperties(fname=fontPath, size=60)
# အောက်ပါ parameter တွေကလည်း သိထားသင့်တယ်
#plt.rcParams['font.family'] = 'sans-serif'
#plt.rcParams['font.sans-serif'] = ['Myanmar3']
#matplotlib.rcParams.update({'font.size': 50})
plt.text(0.2, 0.2, "က", fontproperties=prop)
plt.text(0.4, 0.4, "ခ", fontproperties=prop, size=45)
plt.text(0.6, 0.6, "ဂ", fontproperties=prop, size=35)
plt.text(0.8, 0.8, "ဃ", fontproperties=prop, size=25)
plt.text(0.9, 0.9, "င", fontproperties=prop, size=15)
plt.savefig('plot-unicode-char.png')
plt.show()
|
11531988
|
import sys
import os
import argparse
import logging
import json
import time
import numpy as np
import openslide
import PIL
import cv2
import matplotlib.pyplot as plt
import math
import json
import logging
import time
import tensorflow as tf
import gzip
import timeit
from scipy import stats
from tensorflow.keras import backend as K
from skimage.transform import resize, rescale
from scipy import ndimage
from torch.utils.data import DataLoader
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from helpers.utils import *
from dataloader.inference_data_loader import WSIStridedPatchDataset
from models.seg_models import get_inception_resnet_v2_unet_softmax, unet_densenet121
from models.deeplabv3p_original import Deeplabv3
from models.utils import do_crf
from collections import OrderedDict
np.random.seed(0)
# python3 multi_model_test_sequence.py ../configs/Inference_Config.json ../../saved_models/keras_models/DFCN_UNET_CM17_RANDOM_16_NCRF_BCE_DICE_fold_1/model.10-0.24.h5 ../../saved_models/keras_models/IncpResV2_UNET_CM17_RANDOM_16_NCRF_BCE_DICE_fold_0/model.10-0.28.h5 ../../saved_models/keras_models/DeeplabV3p_CM17_RANDOM_16_NCRF_BCE_DICE_fold_2/model.09-0.28.h5
parser = argparse.ArgumentParser(description='Get the probability map of tumor'
' patch predictions given a WSI')
parser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str,
help='Path to the config file in json format related to'
' the ckpt file')
parser.add_argument('model_path_DFCN', default=None, metavar='MODEL_PATH', type=str,
help='Path to the saved model weights file of a Keras model')
parser.add_argument('model_path_IRFCN', default=None, metavar='MODEL_PATH', type=str,
help='Path to the saved model weights file of a Keras model')
parser.add_argument('model_path_DLv3p', default=None, metavar='MODEL_PATH', type=str,
help='Path to the saved model weights file of a Keras model')
parser.add_argument('--GPU', default='0', type=str, help='which GPU to use'
', default 0')
parser.add_argument('--num_workers', default=4, type=int, help='number of '
'workers to use to make batch, default 5')
parser.add_argument('--level', default=6, type=int, help='heatmap generation level,'
' default 6')
parser.add_argument('--sampling_stride', default=int(256//64), type=int, help='Sampling pixels in tissue mask,'
' default 4')
parser.add_argument('--radius', default=12, type=int, help='radius for nms,'
' default 12 (6 used in Google paper at level 7,'
' i.e. inference stride = 128)')
parser.add_argument('--roi_masking', default=True, type=int, help='Sample pixels from tissue mask region,'
' default True, points are not sampled from glass region')
def forward_transform(data, flip, rotate):
"""
Do inverse data augmentation
"""
if flip == 'FLIP_LEFT_RIGHT':
data = np.fliplr(data)
if rotate == 'ROTATE_90':
data = np.rot90(data, 1)
if rotate == 'ROTATE_180':
data = np.rot90(data, 2)
if rotate == 'ROTATE_270':
data = np.rot90(data, 3)
return data
def inverse_transform(data, flip, rotate):
"""
Do inverse data augmentation
"""
if flip == 'FLIP_LEFT_RIGHT':
data = np.fliplr(data)
if rotate == 'ROTATE_90':
data = np.rot90(data, 3)
if rotate == 'ROTATE_180':
data = np.rot90(data, 2)
if rotate == 'ROTATE_270':
data = np.rot90(data, 1)
return data
def get_index(coord_ax, probs_map_shape_ax, grid_ax):
"""
This function checks whether coordinates are within the WSI
"""
# print (coord_ax, probs_map_shape_ax, grid_ax)
_min = grid_ax//2
_max = grid_ax//2
ax_min = coord_ax - _min
while ax_min < 0:
_min -= 1
ax_min += 1
ax_max = coord_ax + _max
while ax_max > probs_map_shape_ax:
_max -= 1
ax_max -= 1
return _min, _max
def get_wsi_cases(args, train_mode, model_name, dataset_name, patient_range, group_range):
'''
Get WSI cases with paths in a dictionary
'''
wsi_dic = OrderedDict()
level = args.level
sampling_stride = args.sampling_stride
npy_base_path = '../../predictions/{}/{}/level_{}_{}/npy'.format(model_name, dataset_name, str(level), str(sampling_stride))
csv_base_path = '../../predictions/{}/{}/level_{}_{}/csv'.format(model_name, dataset_name, str(level), str(sampling_stride))
png_base_path = '../../predictions/{}/{}/level_{}_{}/png'.format(model_name, dataset_name, str(level), str(sampling_stride))
xml_base_path = '../../predictions/{}/{}/level_{}_{}/xml'.format(model_name, dataset_name, str(level), str(sampling_stride))
l=patient_range[0];u=patient_range[1]
tissue_mask_base_path_v1 = '../../data/TM_L{}_v1'.format(level)
tissue_mask_base_path_v2 = '../../data/TM_L{}_v2'.format(level)
if not os.path.exists(tissue_mask_base_path_v1):
os.makedirs(tissue_mask_base_path_v1)
if not os.path.exists(tissue_mask_base_path_v2):
os.makedirs(tissue_mask_base_path_v2)
with open(args.cfg_path) as f:
cfg = json.load(f)
if train_mode:
# for training
label_base_path = cfg['cm17_train_annotation_path']
else:
# for testing
l=patient_range[0];u=patient_range[1]
if not os.path.exists(npy_base_path):
os.makedirs(npy_base_path)
if not os.path.exists(csv_base_path):
os.makedirs(csv_base_path)
if not os.path.exists(png_base_path):
os.makedirs(png_base_path)
if not os.path.exists(xml_base_path):
os.makedirs(xml_base_path)
# Numpy paths to 3 models from 3 folds and 1 ensemble model prediction
model1_npy_path = os.path.join(npy_base_path, 'model1')
if not os.path.exists(model1_npy_path):
os.mkdir(model1_npy_path)
model2_npy_path = os.path.join(npy_base_path, 'model2')
if not os.path.exists(model2_npy_path):
os.mkdir(model2_npy_path)
model3_npy_path = os.path.join(npy_base_path, 'model3')
if not os.path.exists(model3_npy_path):
os.mkdir(model3_npy_path)
ensemble_model_npy_path = os.path.join(npy_base_path, 'ensemble')
if not os.path.exists(ensemble_model_npy_path):
os.mkdir(ensemble_model_npy_path)
# Ensembled CRF labelled multiplies to prob_map at threshold 0.5
crf_model_npy_path = os.path.join(npy_base_path, 'ensemble_crf_l50')
if not os.path.exists(crf_model_npy_path):
os.mkdir(crf_model_npy_path)
for i in range(l,u):
for j in range(group_range[0], group_range[1]):
wsi_name = 'patient_{:03d}_node_{}'.format(i,j)
path_dic = {}
if train_mode:
folder = 'center_'+str(int(i//20))
wsi_path = cfg['cm17_train_data_path']+'/{}/patient_{:03d}_node_{}.tif'.format(folder,i,j)
label_path = label_base_path + '/patient_{:03d}_node_{}_mask.tif'.format(i,j)
if not os.path.exists(label_path):
label_path = None
else:
wsi_path = cfg['cm17_test_data_path']+'/patient_{:03d}_node_{}.tif'.format(i,j)
label_path = None
# Tissue Mask Generation
mask_path_v1 = tissue_mask_base_path_v1+'/patient_{:03d}_node_{}.npy'.format(i,j)
if not os.path.exists(mask_path_v1):
slide = openslide.OpenSlide(wsi_path)
tissue_mask_v1 = TissueMaskGeneration(slide, level)
np.save(mask_path_v1, tissue_mask_v1)
plt.imshow(tissue_mask_v1.T)
plt.savefig(tissue_mask_base_path_v1 + '/' + os.path.basename(mask_path_v1).split('.')[0]+'.png')
mask_path_v2 = tissue_mask_base_path_v2+'/patient_{:03d}_node_{}.npy'.format(i,j)
if not os.path.exists(mask_path_v2):
tissue_mask_v2 = TissueMaskGeneration_BIN_OTSU(slide, level)
mask_path_v2 = tissue_mask_base_path_v2+'/patient_{:03d}_node_{}.npy'.format(i,j)
np.save(mask_path_v2, tissue_mask_v2)
plt.imshow(tissue_mask_v2.T)
plt.savefig(tissue_mask_base_path_v2 + '/' + os.path.basename(mask_path_v2).split('.')[0]+'.png')
# Save_path lists
path_dic['wsi_path'] = wsi_path
path_dic['label_path'] = label_path
path_dic['tissue_mask_path_v1'] = mask_path_v1
path_dic['tissue_mask_path_v2'] = mask_path_v2
path_dic['model1_path'] = model1_npy_path + '/patient_{:03d}_node_{}.npy'.format(i,j)
path_dic['model2_path'] = model2_npy_path + '/patient_{:03d}_node_{}.npy'.format(i,j)
path_dic['model3_path'] = model3_npy_path + '/patient_{:03d}_node_{}.npy'.format(i,j)
path_dic['ensemble_model_path'] = ensemble_model_npy_path + '/patient_{:03d}_node_{}.npy'.format(i,j)
path_dic['crf_model_path'] = crf_model_npy_path + '/patient_{:03d}_node_{}.npy'.format(i,j)
path_dic['png_ensemble_path'] = png_base_path + '/patient_{:03d}_node_{}_ensemble.png'.format(i,j)
path_dic['png_ensemble_crf_path'] = png_base_path + '/patient_{:03d}_node_{}_ensemble_crf.png'.format(i,j)
path_dic['csv_ensemble_path'] = csv_base_path + '/patient_{:03d}_node_{}.csv'.format(i,j)
path_dic['xml_ensemble_path'] = xml_base_path + '/patient_{:03d}_node_{}.xml'.format(i,j)
path_dic['csv_ensemble_crf_path'] = csv_base_path + '/patient_{:03d}_node_{}_crf.csv'.format(i,j)
path_dic['xml_ensemble_crf_path'] = xml_base_path + '/patient_{:03d}_node_{}_crf.xml'.format(i,j)
wsi_dic[wsi_name] = path_dic
return wsi_dic
def rescale_image_intensity(image, factor=128):
return np.uint8(image*128+128)
def get_probs_map(model_dic, dataloader, count_map_enabled=True):
"""
Generate probability map
"""
n_models = len(model_dic)
probs_map = np.zeros((n_models,) + dataloader.dataset._mask.shape)
label_map_t50 = np.zeros((n_models,) + dataloader.dataset._mask.shape, dtype=np.uint8)
count_map = np.zeros((n_models,) + dataloader.dataset._mask.shape, dtype='uint8')
num_batch = len(dataloader)
batch_size = dataloader.batch_size
map_x_size = dataloader.dataset._mask.shape[0]
map_y_size = dataloader.dataset._mask.shape[1]
level = dataloader.dataset._level
# factor = dataloader.dataset._sampling_stride
factor = dataloader.dataset._image_size//pow(2, level)
down_scale = 1.0 / pow(2, level)
count = 0
time_now = time.time()
for (image_patches, x_coords, y_coords, label_patches) in dataloader:
image_patches = image_patches.cpu().data.numpy()
label_patches = label_patches.cpu().data.numpy()
x_coords = x_coords.cpu().data.numpy()
y_coords = y_coords.cpu().data.numpy()
batch_size = image_patches.shape[0]
for j in range(len(model_dic)):
y_preds = model_dic[j].predict(image_patches, batch_size=batch_size, verbose=1, steps=None)
for i in range(batch_size):
y_preds_rescaled = rescale(y_preds[i], down_scale, anti_aliasing=False)
xmin, xmax = get_index(x_coords[i], map_x_size, factor)
ymin, ymax = get_index(y_coords[i], map_y_size, factor)
probs_map[j, x_coords[i] - xmin: x_coords[i] + xmax, y_coords[i] - ymin: y_coords[i] + ymax] +=\
y_preds_rescaled[:,:,1].T[0:xmin+xmax, 0:ymin+ymax]
count_map[j, x_coords[i] - xmin: x_coords[i] + xmax, y_coords[i] - ymin: y_coords[i] + ymax] +=\
np.ones_like(y_preds_rescaled[:,:,1].T[0:xmin+xmax, 0:ymin+ymax], dtype='uint8')
label_t50 = labelthreshold(y_preds[i][:,:,1], threshold=.5)
if np.sum(label_t50) >0:
MAP = do_crf(rescale_image_intensity(image_patches[i]), np.argmax(y_preds[i], axis=2), 2, enable_color=True, zero_unsure=False)
MAP_rescaled = rescale(MAP, down_scale, order=0, preserve_range=True)
else:
MAP_rescaled = np.zeros_like(y_preds_rescaled[:,:,1])
label_map_t50[j, x_coords[i] - xmin: x_coords[i] + xmax, y_coords[i] - ymin: y_coords[i] + ymax] =\
MAP_rescaled.T[0:xmin+xmax, 0:ymin+ymax]
count += 1
time_spent = time.time() - time_now
time_now = time.time()
print ('{}, batch : {}/{}, Run Time : {:.2f}'
.format(
time.strftime("%Y-%m-%d %H:%M:%S"), count, num_batch, time_spent))
# imshow(count_map[0].T, count_map[1].T, count_map[2].T)
np.place(count_map, count_map==0, 1)
probs_map /= count_map
# imshow(dataloader.dataset._gt.T, probs_map[0].T, probs_map[1].T, probs_map[2].T, np.mean(probs_map, axis=0).T)
return probs_map, label_map_t50
def make_dataloader(wsi_path, mask_path, label_path, args, cfg, flip='NONE', rotate='NONE'):
batch_size = cfg['batch_size']
dataloader = DataLoader(WSIStridedPatchDataset(wsi_path, mask_path,
label_path,
image_size=cfg['image_size'],
normalize=True, flip=flip, rotate=rotate,
level=args.level, sampling_stride=args.sampling_stride, roi_masking=args.roi_masking),
batch_size=batch_size, num_workers=args.num_workers, drop_last=False)
return dataloader
def run(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
logging.basicConfig(level=logging.INFO)
core_config = tf.ConfigProto()
core_config.gpu_options.allow_growth = True
session =tf.Session(config=core_config)
K.set_session(session)
with open(args.cfg_path) as f:
cfg = json.load(f)
model_dic = {}
batch_size = cfg['batch_size']
image_size = cfg['image_size']
if args.model_path_DFCN is not None:
model = unet_densenet121((image_size, image_size), weights=None)
model.load_weights(args.model_path_DFCN)
print ("Loaded Model Weights from", args.model_path_DFCN)
model_dic[0] = model
if args.model_path_IRFCN is not None:
model = get_inception_resnet_v2_unet_softmax((image_size, image_size), weights=None)
model.load_weights(args.model_path_IRFCN)
print ("Loaded Model Weights from", args.model_path_IRFCN)
model_dic[1] = model
if args.model_path_DLv3p is not None:
model = Deeplabv3(input_shape=(image_size, image_size, 3), weights=None,\
classes=2, activation = 'softmax', backbone='xception', OS=16)
model.load_weights(args.model_path_DLv3p)
print ("Loaded Model Weights from", args.model_path_DLv3p)
model_dic[2] = model
wsi_dic = get_wsi_cases(args, train_mode=False, model_name='Ensemble', dataset_name='CM17_Train', patient_range=(100,125), group_range=(0,5))
for key in wsi_dic.keys():
print ('Working on:', key)
wsi_path = wsi_dic[key]['wsi_path']
label_path = wsi_dic[key]['label_path']
mask_path = wsi_dic[key]['tissue_mask_path_v2']
if not os.path.exists(wsi_dic[key]['ensemble_model_path']):
dataloader = make_dataloader(wsi_path, mask_path, label_path, args, cfg, flip='NONE', rotate='NONE')
probs_map, label_t50_map = get_probs_map(model_dic, dataloader)
# Saving the results
np.save(wsi_dic[key]['model1_path'], probs_map[0])
np.save(wsi_dic[key]['model2_path'], probs_map[1])
np.save(wsi_dic[key]['model3_path'], probs_map[2])
ensemble_prob_map = np.mean(probs_map, axis=0)
np.save(wsi_dic[key]['ensemble_model_path'], ensemble_prob_map)
voted_label_t50_map = np.sum(label_t50_map, axis=0)
np.place(voted_label_t50_map, voted_label_t50_map==1,0)
np.place(voted_label_t50_map, voted_label_t50_map>1,1)
crf_ensemble_prob_map = ensemble_prob_map*voted_label_t50_map
np.save(wsi_dic[key]['crf_model_path'], crf_ensemble_prob_map)
if not os.path.exists(wsi_dic[key]['png_ensemble_path']):
im = np.load(wsi_dic[key]['ensemble_model_path'])
plt.imshow(im.T, cmap='jet')
plt.savefig(wsi_dic[key]['png_ensemble_path'])
im = np.load(wsi_dic[key]['crf_model_path'])
plt.imshow(im.T, cmap='jet')
plt.savefig(wsi_dic[key]['png_ensemble_crf_path'])
if not os.path.exists(wsi_dic[key]['csv_ensemble_path']):
nms_command = 'python3 nms.py'+' '+wsi_dic[key]['ensemble_model_path']+' '+wsi_dic[key]['csv_ensemble_path']+\
' '+wsi_dic[key]['xml_ensemble_path']+' --level='+str(args.level)+' --radius='+str(args.radius)
print (nms_command)
os.system(nms_command)
if not os.path.exists(wsi_dic[key]['csv_ensemble_crf_path']):
nms_command = 'python3 nms.py'+' '+wsi_dic[key]['crf_model_path']+' '+wsi_dic[key]['csv_ensemble_crf_path']+\
' '+wsi_dic[key]['xml_ensemble_crf_path']+' --level='+str(args.level)+' --radius='+str(args.radius)
print (nms_command)
os.system(nms_command)
def main():
t0 = timeit.default_timer()
args = parser.parse_args()
run(args)
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60))
if __name__ == '__main__':
main()
|
11531998
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import json
import six
import requests
from .exceptions import RegistryError
# Module API
class Registry(object):
'''Allow loading Data Package profiles from a registry.
Args:
registry_path_or_url (str): Path or URL to the registry's CSV file. It
defaults to the local registry cache path.
Raises:
RegistryError: If there was some problem opening the registry file or
its format was incorrect.
'''
# Public
DEFAULT_REGISTRY_URL = 'https://specs.frictionlessdata.io/schemas/registry.json'
DEFAULT_REGISTRY_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'profiles',
'registry.json'
)
def __init__(self, registry_path_or_url=DEFAULT_REGISTRY_PATH):
if os.path.isfile(registry_path_or_url):
self._BASE_PATH = os.path.dirname(
os.path.abspath(registry_path_or_url)
)
try:
self._profiles = {}
self._registry = self._get_registry(registry_path_or_url)
except (IOError, ValueError) as e:
six.raise_from(RegistryError(e), e)
@property
def available_profiles(self):
'''dict: The available profiles' metadata keyed by their ids.'''
return self._registry
@property
def base_path(self):
'''str: The base path of this Registry (None if it's remote).'''
try:
return self._BASE_PATH
except AttributeError:
pass
def get(self, profile_id):
'''Returns the profile with the received ID as a dict
If a local copy of the profile exists, it'll be returned. If not, it'll
be downloaded from the web. The results are cached, so any subsequent
calls won't hit the filesystem or the web.
Args:
profile_id (str): The ID of the profile you want.
Raises:
RegistryError: If there was some problem opening the profile file
or its format was incorrect.
'''
if profile_id not in self._profiles:
try:
self._profiles[profile_id] = self._get_profile(profile_id)
except (ValueError,
IOError) as e:
six.raise_from(RegistryError(e), e)
return self._profiles[profile_id]
# Internal
def _get_profile(self, profile_id):
'''dict: Return the profile with the received ID as a dict (None if it
doesn't exist).'''
profile_metadata = self._registry.get(profile_id)
if not profile_metadata:
return
path = self._get_absolute_path(profile_metadata.get('schema_path'))
url = profile_metadata.get('schema')
if path:
try:
return self._load_json_file(path)
except IOError as local_exc:
if not url:
raise local_exc
try:
return self._load_json_url(url)
except IOError:
msg = (
'Error loading profile locally at "{path}" '
'and remotely at "{url}".'
).format(path=path, url=url)
six.raise_from(IOError(msg), local_exc)
elif url:
return self._load_json_url(url)
def _get_registry(self, registry_path_or_url):
'''dict: Return the registry as dict with profiles keyed by id.'''
if registry_path_or_url.startswith('http'):
profiles = self._load_json_url(registry_path_or_url)
else:
profiles = self._load_json_file(registry_path_or_url)
try:
registry = {}
for profile in profiles:
registry[profile['id']] = profile
return registry
except KeyError as e:
msg = (
'Registry at "{path}" has no "id" column.'
).format(path=registry_path_or_url)
six.raise_from(ValueError(msg), e)
def _get_absolute_path(self, relative_path):
'''str: Return the received relative_path joined with the base path
(None if there were some error).'''
try:
return os.path.join(self.base_path, relative_path)
except (AttributeError, TypeError):
pass
def _load_json_file(self, path):
with io.open(path, 'r', encoding='utf-8') as f:
return json.load(f)
def _load_json_url(self, url):
'''dict: Return the JSON at the local path or URL as a dict.'''
res = requests.get(url)
res.raise_for_status()
return res.json()
|
11532020
|
import logging
import torch
import torch.distributed as dist
from torch.autograd import Variable
SMOOTH = 1e-6
logger = logging.getLogger("ActiveLearning")
def accuracy(dataloader, net, top_k=(1, 5), **kwargs):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
net_name = kwargs["net_name"]
if "weights_path" in kwargs.keys():
net_dict = torch.load(kwargs["weights_path"], map_location=device)
net.load_state_dict(net_dict)
del net_dict
net.eval()
sample_input, sample_output, _ = next(iter(dataloader))
max_k = max(top_k)
corrects_dict = {k: 0.0 for k in top_k}
if "dataset" in dataloader.dataset.__dict__:
num_classes = dataloader.dataset.dataset.num_classes
else:
num_classes = dataloader.dataset.num_classes
corrects_byclass = torch.zeros((num_classes,), dtype=torch.float) # need to change this by
# class count dynamically
count_byclass = torch.zeros((num_classes,), dtype=torch.float)
for batch_idx, data in enumerate(dataloader):
inputs, targets, idxs = data
output_ = []
with torch.no_grad():
inputs = inputs.to(device) # [N, C, H, W]
targets = targets.to(device) # [N,]
output = net(inputs) # [N, O]
_, predictions = torch.topk(output, max_k, dim=1, largest=True, sorted=True)
for k in top_k:
k_pred = predictions[:, :k]
corrects = torch.sum(k_pred == targets.unsqueeze(1).expand_as(k_pred)).to('cpu')
corrects_dict[k] += corrects
if k == 1:
corrects_vec = (k_pred == targets[:, None]).to('cpu') # [N, 1]
corrects_byclass_b = corrects_vec & (
targets.to('cpu')[:, None] == torch.arange(num_classes)[None, :])
corrects_byclass += corrects_byclass_b.sum(dim=0).float()
count_byclass += (targets.to('cpu')[:, None] == torch.arange(num_classes)[None,
:]).sum(dim=0).float()
msg = f"\tEval Batch {batch_idx + 1}/{len(dataloader)}"
if batch_idx % 25 == 0:
logger.info(msg)
output = {}
for k in corrects_dict:
output[f'top_{k}_correct_count'] = corrects_dict[k]
output[f'top_{k}_accuracy'] = corrects_dict[k] / len(dataloader.dataset)
output['accuracy'] = corrects_dict[1] / len(dataloader.dataset)
output['accuracy_byclass'] = corrects_byclass / count_byclass
output['count_byclass'] = count_byclass
output['corrects_byclass'] = corrects_byclass
output['count'] = len(dataloader.dataset)
return output
def gather_parallel_eval(eval_dict, world_size, device):
assert world_size > 1
def gather_variable_int(name):
org_var = eval_dict[name]
org_var = torch.Tensor([org_var])
org_var = org_var.to(device)
arrs = [torch.zeros_like(org_var) for _ in range(world_size)]
dist.all_gather(arrs, org_var)
return sum(arrs).item()
def gather_variable_tensor(name):
org_var = eval_dict[name].clone()
org_var = org_var.to(device)
arrs = [torch.zeros_like(org_var) for _ in range(world_size)]
dist.all_gather(arrs, org_var)
return sum(arrs)
# Gather number of examples
count = gather_variable_int('count')
count_byclass = gather_variable_tensor('count_byclass')
# Gather number of correct predictions
top_1_correct_count = gather_variable_int('top_1_correct_count')
top_5_correct_count = gather_variable_int('top_5_correct_count')
corrects_byclass = gather_variable_tensor('corrects_byclass')
top_1_acc = top_1_correct_count / count
top_5_acc = top_5_correct_count / count
accuracy_byclass = corrects_byclass / count_byclass
return top_1_acc, top_5_acc, accuracy_byclass.cpu()
def evaluate(dataloader, **kwargs):
metric = kwargs["metric"]
metric_func = eval(metric)
return metric_func(dataloader, **kwargs)
|
11532026
|
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.datasets import load_digits
from deepforest import CascadeForestClassifier
toy_kwargs = {
"n_bins": 10,
"bin_subsample": 2e5,
"max_layers": 10,
"n_estimators": 1,
"criterion": "gini",
"n_trees": 100,
"max_depth": 3,
"min_samples_leaf": 1,
"use_predictor": True,
"predictor": "forest",
"predictor_kwargs": {},
"n_tolerant_rounds": 2,
"delta": 1e-5,
"n_jobs": -1,
"random_state": 0,
"verbose": 2,
}
def test_model_input_label_encoder():
"""Test if the model behaves the same with and without label encoding."""
# Load data
X, y = load_digits(return_X_y=True)
y_as_str = np.char.add("label_", y.astype(str))
# Train model on integer labels. Labels should look like: 1, 2, 3, ...
model = CascadeForestClassifier(**toy_kwargs)
model.fit(X, y)
y_pred_int_labels = model.predict(X)
# Train model on string labels. Labels should look like: "label_1", "label_2", "label_3", ...
model = CascadeForestClassifier(**toy_kwargs)
model.fit(X, y_as_str)
y_pred_str_labels = model.predict(X)
# Check if the underlying data are the same
y_pred_int_labels_as_str = np.char.add(
"label_", y_pred_int_labels.astype(str)
)
assert_array_equal(y_pred_str_labels, y_pred_int_labels_as_str)
# Clean up buffer
model.clean()
|
11532029
|
def ANTRuntime():
raise NotImplemented
def App(on_initialize, on_start, on_stop):
"""
Args:
on_initialize (Function): Callback handler invoked on application's initialization.
on_start (Function): Callback handler invoked when application starts.
on_stop (Function): Callback handler invoked when application stops.
"""
raise NotImplemented
|
11532050
|
class Tasks(object):
def __init__(self, client):
self.client = client
def create_a_task(self, data):
"""
Create a new task
Args:
data:
Returns:
"""
return self.client._post("/dealTasks", json=data)
def retrieve_a_task(self, task_id):
"""
Retrieve an existing task
Args:
task_id:
Returns:
"""
return self.client._get("/dealTasks/{}".format(task_id))
def update_a_task(self, task_id, data):
"""
Update an existing task
Args:
task_id:
data:
Returns:
"""
return self.client._put("/dealTasks/{}".format(task_id), json=data)
def delete_a_task(self, task_id):
"""
Delete an existing task
Args:
task_id:
Returns:
"""
return self.client._delete("/dealTasks/{}".format(task_id))
def list_all_tasks(self, **params):
"""
Retrieve a list of existing tasks
Returns:
"""
return self.client._get("/dealTasks", params=params)
|
11532064
|
from ctypes import POINTER, c_int, c_uint, c_uint64, c_void_p, c_char_p
from .dll import _bind
from .stdinc import SDL_bool
from .video import SDL_Window
# NOTE: I have no idea whether this module actually works
__all__ = [
"SDL_Vulkan_LoadLibrary", "SDL_Vulkan_GetVkGetInstanceProcAddr",
"SDL_Vulkan_UnloadLibrary", "SDL_Vulkan_GetInstanceExtensions",
"SDL_Vulkan_CreateSurface", "SDL_Vulkan_GetDrawableSize"
]
VkInstance = c_void_p
# TODO: 32bit/64bit
VkSurfaceKHR = c_uint64
SDL_Vulkan_LoadLibrary = _bind("SDL_Vulkan_LoadLibrary", [c_char_p], c_int, added='2.0.6')
SDL_Vulkan_GetVkGetInstanceProcAddr = _bind("SDL_Vulkan_GetVkGetInstanceProcAddr", None, c_void_p, added='2.0.6')
SDL_Vulkan_UnloadLibrary = _bind("SDL_Vulkan_UnloadLibrary", None, None, added='2.0.6')
SDL_Vulkan_GetInstanceExtensions = _bind("SDL_Vulkan_GetInstanceExtensions", [POINTER(SDL_Window), POINTER(c_uint), POINTER(c_char_p)], SDL_bool, added='2.0.6')
SDL_Vulkan_CreateSurface = _bind("SDL_Vulkan_CreateSurface", [POINTER(SDL_Window), VkInstance, POINTER(VkSurfaceKHR)], SDL_bool, added='2.0.6')
SDL_Vulkan_GetDrawableSize = _bind("SDL_Vulkan_GetDrawableSize", [POINTER(SDL_Window), POINTER(c_int), POINTER(c_int)], None, added='2.0.6')
|
11532066
|
import logging
import os
import example_app.static
from jivago.config.production_jivago_context import ProductionJivagoContext
from jivago.jivago_application import JivagoApplication
from jivago.lang.annotations import Override
from jivago.lang.registry import Registry
from jivago.wsgi.routing.router import Router
from jivago.wsgi.routing.serving.static_file_routing_table import StaticFileRoutingTable
class ExampleStaticServingContext(ProductionJivagoContext):
@Override
def create_router(self) -> Router:
router = super().create_router()
router.add_routing_table(StaticFileRoutingTable(os.path.dirname(example_app.static.__file__)), "/static")
return router
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
app = JivagoApplication(example_app, context=ExampleStaticServingContext(example_app, Registry.INSTANCE))
app.run_dev()
|
11532073
|
import unittest
from django.db import connection
from django.test import TestCase
from ..models import Person
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class DatabaseSequenceTests(TestCase):
def test_get_sequences(self):
with connection.cursor() as cursor:
seqs = connection.introspection.get_sequences(cursor, Person._meta.db_table)
self.assertEqual(
seqs,
[{'table': Person._meta.db_table, 'column': 'id', 'name': 'backends_person_id_seq'}]
)
cursor.execute('ALTER SEQUENCE backends_person_id_seq RENAME TO pers_seq')
seqs = connection.introspection.get_sequences(cursor, Person._meta.db_table)
self.assertEqual(
seqs,
[{'table': Person._meta.db_table, 'column': 'id', 'name': 'pers_seq'}]
)
|
11532085
|
from __future__ import absolute_import
from .legacy import (
create_message, create_gzip_message,
create_snappy_message, create_message_set,
CODEC_NONE, CODEC_GZIP, CODEC_SNAPPY, ALL_CODECS,
ATTRIBUTE_CODEC_MASK, KafkaProtocol,
)
API_KEYS = {
0: 'Produce',
1: 'Fetch',
2: 'ListOffsets',
3: 'Metadata',
4: 'LeaderAndIsr',
5: 'StopReplica',
6: 'UpdateMetadata',
7: 'ControlledShutdown',
8: 'OffsetCommit',
9: 'OffsetFetch',
10: 'FindCoordinator',
11: 'JoinGroup',
12: 'Heartbeat',
13: 'LeaveGroup',
14: 'SyncGroup',
15: 'DescribeGroups',
16: 'ListGroups',
17: 'SaslHandshake',
18: 'ApiVersions',
19: 'CreateTopics',
20: 'DeleteTopics',
21: 'DeleteRecords',
22: 'InitProducerId',
23: 'OffsetForLeaderEpoch',
24: 'AddPartitionsToTxn',
25: 'AddOffsetsToTxn',
26: 'EndTxn',
27: 'WriteTxnMarkers',
28: 'TxnOffsetCommit',
29: 'DescribeAcls',
30: 'CreateAcls',
31: 'DeleteAcls',
32: 'DescribeConfigs',
33: 'AlterConfigs',
}
|
11532098
|
from urllib.parse import urlparse
from privacyscanner.scanmodules import ScanModule
class ExampleScanModule(ScanModule):
name = 'example'
dependencies = []
required_keys = ['site_url']
def scan_site(self, result, meta):
"""Scans a site and adds more information to the result.
The parameter result behaves like a dictionary, you can set keys on it
and call the usual methods on dicts. If you change a non-shallow key,
you have to mark the underlying shallow key as dirty by calling
result.mark_dirty(shallow_key).
Furthermore result exposes a logger on the logger parameter where you can
send log messages to the scanning master.
For storing files, you can call result.add_file(identifier, filecontents)
which will send filecontents to the master. If you provide a file-like
object it will read from the file and send the file contents to the
master. The identifier represents a file name and must be unique within
a scan. To store files for debug purposes, call result.add_debug_file
instead, which has the same API.
To start with, you can access result['site_url'], which is populated by the
master.
The parameter options is a dictionary with the configuration of the scan
module as specified in the configuration file at SCAN_MODULE_OPTIONS.
"""
self.logger.info('we will check the site for https')
parsed_site = urlparse(result['site_url'])
result['is_https'] = parsed_site.scheme == 'https'
# result.add_file('screenshot.png')
if self.options.get('save_nops'):
result.add_debug_file('nops.bin', b'\x90\x90\x90\x90')
|
11532161
|
import json
from pathlib import Path
from typing import Any, Dict, Union
from . import markers
__all__ = ["matplotlib_style", "markers"]
matplotlib_style: Dict[str, Dict[str, Union[str, Any]]] = json.loads(
(Path(__file__).parent / "matplotlib_style.json").read_text()
)
for _, value in matplotlib_style.items():
if "marker" in value.keys():
value["marker"] = getattr(markers, value["marker"])
|
11532186
|
import struct
def read_string(f, n=0):
if n == 0:
s = b''
while True:
c = f.read(1)
if c == b'\0':
break
s += c
else:
s = f.read(n)
s = s.partition(b'\0')[0]
try:
s = s.decode('ascii')
except:
s = s.decode('latin-1') #FIXME: This is a poor fallback
return s
def read8(f):
return struct.unpack("<B", f.read(1))[0]
def read16(f):
return struct.unpack("<H", f.read(2))[0]
def read16s(f):
return struct.unpack("<h", f.read(2))[0]
def read32(f):
return struct.unpack("<I", f.read(4))[0]
def read32s(f):
return struct.unpack("<i", f.read(4))[0]
def read_float(f):
return struct.unpack("<f", f.read(4))[0]
def read_struct(f, fmt):
size = struct.calcsize(fmt)
return struct.unpack(fmt, f.read(size))
def align(f, n):
return f.read((n - (f.tell() % n)) % n)
def is_repeating(p, v):
for x in list(p):
if x != v:
return False
return True
def code_string(s):
s = s.encode('unicode_escape')
s = s.decode('utf-8')
return "\"%s\"" % s.replace('"', '\\"')
def code_float(f):
#FIXME: Remove trailing zeroes
return "%f" % f
def crc32(data, start=0xFFFFFFFF):
result = start
for byte in data:
mask = result ^ byte
for _ in range(8):
result = ((result << 1) | (result >> 31)) & 0xFFFFFFFF
if mask & 1:
result ^= 0xEDB88320
mask >>= 1
#FIXME: Make this work somehow?
if False:
import zlib
ref = zlib.crc32(data, 0xFFFFFFFF)
ref2 = ref ^ 0xFFFFFFFF
print("%08X == %08X (needs to be %08X)" % (ref, ref2, result))
return result
#FIXME: Remove, this is a check for the CRC algo
if False:
import sys
for arg in sys.argv[1:]:
print("0x%08X: '%s'" % (crc32(arg.encode('ascii')), arg)) # must be B57D1831
#FIXME: Remove, this is a (rather slow) bruteforce algorithm
if False:
alphabet = [x.encode('ascii') for x in "abcdefghijklmnopqrstuvwxyz_0123456789."] # ["%d" % i for i in range(10)]
symbols = [0]
results = [0xFFFFFFFF, 0]
first_valid = 0
checksums = [
0xB57D1831,
0xFDA4795D,
0xFDD2C22E,
0x59BFF7DD,
0xA438A846,
0x00559DB5,
0x8E26EDC7,
0x2A4BD834,
0xF345F263,
0x5728C790,
0x7B88F05C,
0xDFE5C5AF,
0xC32151A1,
0x674C6452
]
while True:
# Update all invalid hashes
for i in range(first_valid+1, len(symbols)+1):
results[i] = crc32(alphabet[symbols[i - 1]], results[i - 1])
# Try the longest one, with all known extensions
prefix_list = [b"", b"load"] # Common prefix can be removed
suffix_list = [b"", b".bmp", b".pre", b".psx", b".vab", b".sfx", b".txt", b".psh"] #FIXME: Can check dot once etc. = sort and trim
if False:
for prefix in prefix_list:
helper = crc32(prefix, results[0])
#FIXME: Need to combine prefix CRC with later CRC; see zlib crc32_combine
# Also see https://stackoverflow.com/questions/23122312/crc-calculation-of-a-mostly-static-data-stream/23126768#23126768
#results[-1]
for suffix in suffix_list:
checksum = crc32(suffix, results[-1])
if checksum in checksums:
word = b"".join([alphabet[symbol] for symbol in symbols])
print("Found '%s' for 0x%08X" % (word + suffix, checksum))
if False:
print()
crc = crc32(word)
print(word, "< ref word (0x%08X)" % crc)
assert(results[-1] == crc)
# Go to next letter
first_valid = len(symbols) - 1
symbols[first_valid] += 1
# Handle overflow
while symbols[first_valid] >= len(alphabet):
# Check if need a new letter
if first_valid == 0:
# Add a new symbol and reset
symbols = [0] * (len(symbols) + 1)
results += [0]
print("Length %d" % len(symbols))
break
else:
# Update the prior one
symbols[first_valid] = 0
first_valid -= 1
symbols[first_valid] += 1
if False:
for i in range(0, first_valid+1):
result = results[i]
print(result, "< hash old [%d %d]" % (i, len(result)))
for i in range(first_valid+1, len(symbols)+1):
results[i] = results[i - 1] + alphabet[symbols[i - 1]]
result = results[i]
print(result, "< hash updated [%d %d]" % (i, len(result)))
for i in range(0, len(results)):
result = results[i]
assert(len(result) == i)
print("O" * first_valid)
print(" " * first_valid + "N" * (len(symbols) - first_valid))
class _FileWriter():
def __init__(self):
self._contents = []
def _write(self, data):
self._contents += [data]
def Save(self, path):
with open(path, "wb") as fo:
fo.write(''.join(self._contents).encode('utf-8'))
class WavefrontMtl(_FileWriter):
def __init__(self):
_FileWriter.__init__(self)
def NewMaterial(self, name):
#FIXME: How to handle spaces?
self._write("newmtl %s\n" % name)
def _map(self, target, name, scale=None):
line = "map_%s" % target
if scale != None:
line += " -s %f %f %f" % scale
#FIXME: Bugs in blender prevent use of quotation marks? Debug..
# For now, replace spaces by underscore
line += " %s\n" % name.replace(" ", "_")
self._write(line)
def IlluminationMode(self, mode):
self._write("illum %d\n" % mode)
def DiffuseMap(self, name, scale=None):
self._map("Kd", name, scale)
def DissolveMap(self, name, scale=None):
self._map("d", name, scale)
class WavefrontObj(_FileWriter):
def __init__(self):
_FileWriter.__init__(self)
self._vertex_count = 0
self._normal_count = 0
self._texture_coordinate_count = 0
def Object(self, name):
self._write("o %s\n" % name)
def MaterialLibrary(self, name):
self._write("mtllib %s\n" % name)
def UseMaterial(self, name):
self._write("usemtl %s\n" % name)
def Comment(self, comment):
#FIXME: Split by line and ensure "# " prefix
self._write("# %s\n" % comment)
def Vertex(self, x, y, z):
self._write("v %f %f %f\n" % (x, y, z))
self._vertex_count += 1
return self._vertex_count
def TextureCoordinate(self, u, v):
self._write("vt %f %f\n" % (u, v))
self._texture_coordinate_count += 1
return self._texture_coordinate_count
def Normal(self, x, y, z):
self._write("vn %f %f %f\n" % (x, y, z))
self._normal_count += 1
return self._normal_count
def Face(self, vertex_indices, texture_coordinate_indices, normal_indices):
assert(texture_coordinate_indices == None or len(texture_coordinate_indices) == len(vertex_indices))
assert(normal_indices == None or len(normal_indices) == len(vertex_indices))
line = "f"
for i, vertex_index in enumerate(vertex_indices):
line += " %d" % vertex_index
# Helper to keep a clean file
def index(line, indices, skipped=0):
if indices != None:
line += "/" * skipped + "/%d" % indices[i]
return line, 0
return line, skipped + 1
# Write additional information
line, skipped = index(line, texture_coordinate_indices)
line, skipped = index(line, normal_indices, skipped)
line += "\n"
self._write(line)
|
11532206
|
from PyQt5.QtWidgets import QLineEdit, QDialog, QFileDialog, QWidget, QTreeWidget, QToolButton, QRadioButton, QMessageBox, QTreeWidgetItem, QTabWidget, QLabel, QCheckBox, QPushButton, QSpinBox
from os.path import basename
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QColor, QBrush
from PyQt5.QtCore import Qt
from PyQt5 import uic
import configparser
import os
import matplotlib.pyplot as plt
import numpy as np
from pulse.postprocessing.plot_structural_data import get_reactions
from data.user_input.project.printMessageInput import PrintMessageInput
window_title_1 = "ERROR"
window_title_2 = "WARNING"
window_title_3 = "INFORMATION"
class SnaptoCursor(object):
def __init__(self, ax, x, y, show_cursor):
self.ax = ax
self.x = x
self.y = y
self.show_cursor = show_cursor
if show_cursor:
self.vl = self.ax.axvline(x=x[0], color='k', alpha=0.3, label='_nolegend_') # the vertical line
self.hl = self.ax.axhline(y=y[0], color='k', alpha=0.3, label='_nolegend_') # the horizontal line
self.marker, = ax.plot(x[0], y[0], markersize=4, marker="s", color=[0,0,0], zorder=3)
# self.marker.set_label("x: %1.2f // y: %4.2e" % (self.x[0], self.y[0]))
# plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
def mouse_move(self, event):
if self.show_cursor:
if not event.inaxes:
return
x, y = event.xdata, event.ydata
if x>=np.max(self.x):
return
indx = np.searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
self.vl.set_xdata(x)
self.hl.set_ydata(y)
self.marker.set_data([x],[y])
self.marker.set_label("x: %1.2f // y: %4.2e" % (x, y))
plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
self.ax.figure.canvas.draw_idle()
class PlotReactionsInput(QDialog):
def __init__(self, opv, project, analysisMethod, frequencies, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('data/user_input/ui/Plots/Results/Structural/plotReactionsInput.ui', self)
icons_path = 'data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
self.userPath = os.path.expanduser('~')
self.save_path = ""
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.setWindowModality(Qt.WindowModal)
self.opv = opv
self.opv.setInputObject(self)
self.preprocessor = project.preprocessor
self.before_run = self.preprocessor.get_model_checks()
reactions = project.get_structural_reactions()
self.dict_reactions_at_constrained_dofs, self.dict_reactions_at_springs, self.dict_reactions_at_dampers = reactions
self.analysisMethod = analysisMethod
self.frequencies = frequencies
self.node_ID = 0
self.imported_data = None
self.localDof = None
self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')
self.lineEdit_FileName = self.findChild(QLineEdit, 'lineEdit_FileName')
self.lineEdit_ImportResultsPath = self.findChild(QLineEdit, 'lineEdit_ImportResultsPath')
self.lineEdit_SaveResultsPath = self.findChild(QLineEdit, 'lineEdit_SaveResultsPath')
self.toolButton_ChooseFolderImport = self.findChild(QToolButton, 'toolButton_ChooseFolderImport')
self.toolButton_ChooseFolderImport.clicked.connect(self.choose_path_import_results)
self.toolButton_ChooseFolderExport = self.findChild(QToolButton, 'toolButton_ChooseFolderExport')
self.toolButton_ChooseFolderExport.clicked.connect(self.choose_path_export_results)
self.toolButton_ExportResults = self.findChild(QToolButton, 'toolButton_ExportResults')
self.toolButton_ExportResults.clicked.connect(self.ExportResults)
self.toolButton_ResetPlot = self.findChild(QToolButton, 'toolButton_ResetPlot')
self.toolButton_ResetPlot.clicked.connect(self.reset_imported_data)
self.lineEdit_skiprows = self.findChild(QSpinBox, 'spinBox')
self.radioButton_Fx = self.findChild(QRadioButton, 'radioButton_Fx')
self.radioButton_Fy = self.findChild(QRadioButton, 'radioButton_Fy')
self.radioButton_Fz = self.findChild(QRadioButton, 'radioButton_Fz')
self.radioButton_Mx = self.findChild(QRadioButton, 'radioButton_Mx')
self.radioButton_My = self.findChild(QRadioButton, 'radioButton_My')
self.radioButton_Mz = self.findChild(QRadioButton, 'radioButton_Mz')
self.Fx = self.radioButton_Fx.isChecked()
self.Fy = self.radioButton_Fy.isChecked()
self.Fz = self.radioButton_Fz.isChecked()
self.Mx = self.radioButton_Mx.isChecked()
self.My = self.radioButton_My.isChecked()
self.Mz = self.radioButton_Mz.isChecked()
self.list_radioButtons = [ self.radioButton_Fx, self.radioButton_Fy, self.radioButton_Fz,
self.radioButton_Mx, self.radioButton_My, self.radioButton_Mz ]
self.checkBox_cursor = self.findChild(QCheckBox, 'checkBox_cursor')
self.cursor = self.checkBox_cursor.isChecked()
self.checkBox_cursor.clicked.connect(self.update_cursor)
self.radioButton_plotAbs = self.findChild(QRadioButton, 'radioButton_plotAbs')
self.radioButton_plotReal = self.findChild(QRadioButton, 'radioButton_plotReal')
self.radioButton_plotImag = self.findChild(QRadioButton, 'radioButton_plotImag')
self.radioButton_plotAbs.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotReal.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotImag.clicked.connect(self.radioButtonEvent_YAxis)
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
self.radioButton_Absolute = self.findChild(QRadioButton, 'radioButton_Absolute')
self.radioButton_Real_Imaginary = self.findChild(QRadioButton, 'radioButton_Real_Imaginary')
self.radioButton_Absolute.clicked.connect(self.radioButtonEvent_save_data)
self.radioButton_Real_Imaginary.clicked.connect(self.radioButtonEvent_save_data)
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
self.tabWidget_plot_results = self.findChild(QTabWidget, "tabWidget_plot_results")
self.tab_plot = self.tabWidget_plot_results.findChild(QWidget, "tab_plot")
self.pushButton_AddImportedPlot = self.findChild(QPushButton, 'pushButton_AddImportedPlot')
self.pushButton_AddImportedPlot.clicked.connect(self.ImportResults)
self.pushButton_plot_reactions_frequency_response = self.findChild(QPushButton, 'pushButton_plot_reactions_frequency_response')
self.pushButton_plot_reactions_frequency_response.clicked.connect(self.check)
self.treeWidget_reactions_at_springs = self.findChild(QTreeWidget, 'treeWidget_reactions_at_springs')
self.treeWidget_reactions_at_springs.setColumnWidth(1, 20)
self.treeWidget_reactions_at_springs.setColumnWidth(2, 80)
self.treeWidget_reactions_at_springs.itemClicked.connect(self.on_click_item)
self.treeWidget_reactions_at_springs.itemDoubleClicked.connect(self.on_doubleclick_item)
self.treeWidget_reactions_at_dampers = self.findChild(QTreeWidget, 'treeWidget_reactions_at_dampers')
self.treeWidget_reactions_at_dampers.setColumnWidth(1, 20)
self.treeWidget_reactions_at_dampers.setColumnWidth(2, 80)
self.treeWidget_reactions_at_dampers.itemClicked.connect(self.on_click_item)
self.treeWidget_reactions_at_dampers.itemDoubleClicked.connect(self.on_doubleclick_item)
self.treeWidget_reactions_at_constrained_dofs = self.findChild(QTreeWidget, 'treeWidget_reactions_at_constrained_dofs')
self.treeWidget_reactions_at_constrained_dofs.setColumnWidth(1, 20)
self.treeWidget_reactions_at_constrained_dofs.setColumnWidth(2, 80)
self.treeWidget_reactions_at_constrained_dofs.itemClicked.connect(self.on_click_item)
self.treeWidget_reactions_at_constrained_dofs.itemDoubleClicked.connect(self.on_doubleclick_item)
self.tabWidget_reactions = self.findChild(QTabWidget, "tabWidget_reactions")
self.tab_constrained_dofs = self.tabWidget_plot_results.findChild(QWidget, "tab_constrained_dofs")
self.tab_external_springs_dampers = self.tabWidget_plot_results.findChild(QWidget, "tab_external_springs_dampers")
self.tabWidget_springs_dampers = self.findChild(QTabWidget, "tabWidget_springs_dampers")
self.tab_nodes_with_springs = self.tabWidget_springs_dampers.findChild(QWidget, "tab_nodes_with_springs")
self.tab_nodes_with_dampers = self.tabWidget_springs_dampers.findChild(QWidget, "tab_nodes_with_dampers")
self.load_nodes_info()
self.exec_()
def update_cursor(self):
self.cursor = self.checkBox_cursor.isChecked()
def reset_imported_data(self):
self.imported_data = None
title = "Information"
message = "The plot data has been reseted."
PrintMessageInput([title, message, window_title_2])
def writeNodes(self, list_node_ids):
text = ""
for node in list_node_ids:
text += "{}, ".format(node)
self.lineEdit_nodeID.setText(text)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check()
elif event.key() == Qt.Key_Escape:
self.close()
def isInt(self, value):
try:
int(value)
return True
except:
return False
def radioButtonEvent_YAxis(self):
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
def radioButtonEvent_save_data(self):
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
def choose_path_import_results(self):
self.import_path, _ = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'Files (*.dat; *.csv)')
self.import_name = basename(self.import_path)
self.lineEdit_ImportResultsPath.setText(str(self.import_path))
def ImportResults(self):
try:
skiprows = int(self.lineEdit_skiprows.text())
self.imported_data = np.loadtxt(self.import_path, delimiter=",",skiprows=skiprows)
self.legend_imported = "imported data: "+ basename(self.import_path).split(".")[0]
self.tabWidget_plot_results.setCurrentWidget(self.tab_plot)
title = "Loading table"
message = "The reactions data have been imported."
PrintMessageInput([title, message, window_title_3])
except Exception as _error:
title = "Error reached while loading table"
message = f"{str(_error)}\n It is recommended to skip the header rows."
PrintMessageInput([title, message, window_title_1])
def choose_path_export_results(self):
self.save_path = QFileDialog.getExistingDirectory(None, 'Choose a folder to export the results', self.userPath)
self.save_name = basename(self.save_path)
self.lineEdit_SaveResultsPath.setText(str(self.save_path))
def text_label(self, mask):
text = ""
load_labels = np.array(['Fx','Fy','Fz','Mx','My','Mz'])
temp = load_labels[mask]
if list(mask).count(True) == 6:
text = "[{}, {}, {}, {}, {}, {}]".format(temp[0], temp[1], temp[2], temp[3], temp[4], temp[5])
elif list(mask).count(True) == 5:
text = "[{}, {}, {}, {}, {}]".format(temp[0], temp[1], temp[2], temp[3], temp[4])
elif list(mask).count(True) == 4:
text = "[{}, {}, {}, {}]".format(temp[0], temp[1], temp[2], temp[3])
elif list(mask).count(True) == 3:
text = "[{}, {}, {}]".format(temp[0], temp[1], temp[2])
elif list(mask).count(True) == 2:
text = "[{}, {}]".format(temp[0], temp[1])
elif list(mask).count(True) == 1:
text = "[{}]".format(temp[0])
return text
def load_nodes_info(self):
for node in self.preprocessor.nodes_connected_to_springs:
lumped_stiffness_mask = [False if bc is None else True for bc in node.lumped_stiffness]
new = QTreeWidgetItem([str(node.external_index), str(self.text_label(lumped_stiffness_mask))])
new.setTextAlignment(0, Qt.AlignCenter)
new.setTextAlignment(1, Qt.AlignCenter)
self.treeWidget_reactions_at_springs.addTopLevelItem(new)
for node in self.preprocessor.nodes_connected_to_dampers:
lumped_dampings_mask = [False if bc is None else True for bc in node.lumped_dampings]
new = QTreeWidgetItem([str(node.external_index), str(self.text_label(lumped_dampings_mask))])
new.setTextAlignment(0, Qt.AlignCenter)
new.setTextAlignment(1, Qt.AlignCenter)
self.treeWidget_reactions_at_dampers.addTopLevelItem(new)
for node in self.preprocessor.nodes_with_constrained_dofs:
constrained_dofs_mask = [False, False, False, False, False, False]
for index, value in enumerate(node.prescribed_dofs):
if isinstance(value, complex):
if value == complex(0):
constrained_dofs_mask[index] = True
elif isinstance(value, np.ndarray):
constrained_dofs_mask[index] = False
# constrained_dofs_mask = np.array(node.prescribed_dofs) == complex(0)
if constrained_dofs_mask.count(False) != 6:
new = QTreeWidgetItem([str(node.external_index), str(self.text_label(constrained_dofs_mask))])
new.setTextAlignment(0, Qt.AlignCenter)
new.setTextAlignment(1, Qt.AlignCenter)
self.treeWidget_reactions_at_constrained_dofs.addTopLevelItem(new)
def disable_non_existing_reactions(self, node_id):
node = self.preprocessor.nodes[int(node_id)]
if self.tabWidget_reactions.currentIndex()==0:
mask = [False, False, False, False, False, False]
for index, value in enumerate(node.prescribed_dofs):
if isinstance(value, complex):
if value == complex(0):
mask[index] = True
elif isinstance(value, np.ndarray):
mask[index] = True
# mask = np.array(node.prescribed_dofs) == complex(0)
self.reactions = self.dict_reactions_at_constrained_dofs
self.damper = False
elif self.tabWidget_reactions.currentIndex()==1:
if self.tabWidget_springs_dampers.currentIndex()==0:
mask = [False if bc is None else True for bc in node.lumped_stiffness]
self.reactions = self.dict_reactions_at_springs
self.damper = False
elif self.tabWidget_springs_dampers.currentIndex()==1:
mask = [False if bc is None else True for bc in node.lumped_dampings]
self.reactions = self.dict_reactions_at_dampers
self.damper = True
list_disabled_buttons = []
for index, radioButton in enumerate(self.list_radioButtons):
radioButton.setDisabled(not mask[index])
if not radioButton.isEnabled():
list_disabled_buttons.append(radioButton)
if len(list_disabled_buttons) > 0:
for radioButton in self.list_radioButtons:
if radioButton.isEnabled():
radioButton.setChecked(True)
break
def on_click_item(self, item):
self.lineEdit_nodeID.setText(item.text(0))
self.disable_non_existing_reactions(item.text(0))
def on_doubleclick_item(self, item):
self.lineEdit_nodeID.setText(item.text(0))
self.check()
def button(self):
self.check()
def check(self, export=False):
lineEdit_nodeID = self.lineEdit_nodeID.text()
stop, self.node_ID = self.before_run.check_input_NodeID(lineEdit_nodeID, single_ID=True)
if stop:
return
self.localDof = None
if self.radioButton_Fx.isChecked():
self.localDof = 0
self.localdof_label = "Fx"
self.unit_label = "N"
self.reaction_label = "Force reactions"
if self.radioButton_Fy.isChecked():
self.localDof = 1
self.localdof_label = "Fy"
self.unit_label = "N"
self.reaction_label = "Force reactions"
if self.radioButton_Fz.isChecked():
self.localDof = 2
self.localdof_label = "Fz"
self.unit_label = "N"
self.reaction_label = "Force reactions"
if self.radioButton_Mx.isChecked():
self.localDof = 3
self.localdof_label = "Mx"
self.unit_label = "N.m"
self.reaction_label = "Moment reactions"
if self.radioButton_My.isChecked():
self.localDof = 4
self.localdof_label = "My"
self.unit_label = "N.m"
self.reaction_label = "Moment reactions"
if self.radioButton_Mz.isChecked():
self.localDof = 5
self.localdof_label = "Mz"
self.unit_label = "N.m"
self.reaction_label = "Moment reactions"
if not export:
self.plot()
def ExportResults(self):
if self.lineEdit_FileName.text() != "":
if self.save_path != "":
self.export_path_folder = self.save_path + "/"
else:
title = "Additional action required"
message = "Plese, choose a folder before trying export the results!"
PrintMessageInput([title, message, window_title_2])
return
else:
title = "Additional action required"
message = "Inform a file name before trying export the results!"
PrintMessageInput([title, message, window_title_2])
return
self.check(export=True)
freq = self.frequencies
self.export_path = self.export_path_folder + self.lineEdit_FileName.text() + ".dat"
if self.save_Absolute:
response = get_reactions(self.preprocessor, self.reactions, self.node_ID, self.localDof)
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}], Absolute [{}]").format( self.unit_label,
self.unit_label,
self.unit_label )
data_to_export = np.array([freq, np.real(response), np.imag(response), np.abs(response)]).T
elif self.save_Real_Imaginary:
response = get_reactions(self.preprocessor, self.reactions, self.node_ID, self.localDof)
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}]").format(self.unit_label, self.unit_label)
data_to_export = np.array([freq, np.real(response), np.imag(response)]).T
np.savetxt(self.export_path, data_to_export, delimiter=",", header=header)
title = "Information"
message = "The results have been exported."
PrintMessageInput([title, message, window_title_2])
def plot(self):
fig = plt.figure(figsize=[12,7])
ax = fig.add_subplot(1,1,1)
frequencies = self.frequencies
response = get_reactions( self.preprocessor,
self.reactions,
self.node_ID,
self.localDof,
absolute=self.plotAbs,
real=self.plotReal,
imaginary=self.plotImag )
if self.damper and self.frequencies[0]==0:
frequencies = self.frequencies[1:]
response = response[1:]
if self.plotAbs:
ax.set_ylabel(("{} - Absolute [{}]").format(self.reaction_label, self.unit_label), fontsize = 14, fontweight = 'bold')
elif self.plotReal:
ax.set_ylabel(("{} - Real [{}]").format(self.reaction_label, self.unit_label), fontsize = 14, fontweight = 'bold')
elif self.plotImag:
ax.set_ylabel(("{} - Imaginary [{}]").format(self.reaction_label, self.unit_label), fontsize = 14, fontweight = 'bold')
#cursor = Cursor(ax)
cursor = SnaptoCursor(ax, frequencies, response, self.cursor)
plt.connect('motion_notify_event', cursor.mouse_move)
legend_label = "Reaction {} at node {}".format(self.localdof_label, self.node_ID)
if self.imported_data is None:
if any(value<=0 for value in response):
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
# second_plot, = plt.semilogy(data[:,0], np.abs(data[:,1]+1j*data[:,2]), color=[0,0,1], linewidth=1)
_legends = plt.legend(handles=[first_plot], labels=[legend_label], loc='upper right')
else:
data = self.imported_data
imported_Xvalues = data[:,0]
if self.plotAbs:
imported_Yvalues = np.abs(data[:,1] + 1j*data[:,2])
elif self.plotReal:
imported_Yvalues = data[:,1]
elif self.plotImag:
imported_Yvalues = data[:,2]
if any(value<=0 for value in response) or any(value<=0 for value in imported_Yvalues):
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2)
second_plot, = plt.plot(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
second_plot, = plt.semilogy(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
_legends = plt.legend(handles=[first_plot, second_plot], labels=[legend_label, self.legend_imported], loc='upper right')
plt.gca().add_artist(_legends)
ax.set_title(('REACTIONS FREQUENCY RESPONSE - {}').format(self.analysisMethod.upper()), fontsize = 16, fontweight = 'bold')
ax.set_xlabel(('Frequency [Hz]'), fontsize = 14, fontweight = 'bold')
plt.show()
|
11532235
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.exporter` is deprecated, "
"use `scrapy.exporters` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.exporters import *
from scrapy.exporters import PythonItemExporter
|
11532272
|
import unittest
import numpy as np
from holoviews import NdOverlay, Element
class CompositeTest(unittest.TestCase):
"For testing of basic composite element types"
def setUp(self):
self.data1 = np.zeros((10, 2))
self.data2 = np.ones((10, 2))
self.data3 = np.ones((10, 2)) * 2
self.view1 = Element(self.data1, label='view1')
self.view2 = Element(self.data2, label='view2')
self.view3 = Element(self.data3, label='view3')
class OverlayTest(CompositeTest):
def test_overlay(self):
NdOverlay(list(enumerate([self.view1, self.view2, self.view3])))
def test_overlay_iter(self):
views = [self.view1, self.view2, self.view3]
overlay = NdOverlay(list(enumerate(views)))
for el, v in zip(overlay, views):
self.assertEqual(el, v)
def test_overlay_integer_indexing(self):
overlay = NdOverlay(list(enumerate([self.view1, self.view2, self.view3])))
self.assertEqual(overlay[0], self.view1)
self.assertEqual(overlay[1], self.view2)
self.assertEqual(overlay[2], self.view3)
try:
overlay[3]
raise AssertionError("Index should be out of range.")
except KeyError: pass
|
11532309
|
import os
from Deployment.ConsumerWorker import celery_worker_app
from Deployment.server_config import IS_TEST, OCR_TRITON_URL, OCR_TRITON_PORT
from Operators.ExampleTextDetectOperator import GeneralDBDetect
from Operators.ExampleTextRecognizeOperator import GeneralCRNN
from Operators.ExampleTextOrientationClassificationOperator import GeneralTextOrientationOperator, TextImageOrientation
from Utils.AnnotationTools import annotate_detect_rotated_bbox_and_text_result
from Utils.GeometryUtils import get_rotated_box_roi_from_image, resize_with_long_side, rotate_degree_img, \
force_convert_image_to_bgr
from Utils.ServiceUtils import ServiceTask
from Utils.Storage import get_oss_handler
from Utils.misc import get_date_string, get_uuid_name
# 初始化所有会用到的op
# 初始化crnn的op
text_recognize_op = GeneralCRNN({
'name': 'triton',
'backbone_type': 'res34',
'triton_url': OCR_TRITON_URL,
'triton_port': OCR_TRITON_PORT
}, 'common', IS_TEST)
# 初始化db的op
db_res18_op = GeneralDBDetect({
'name': 'triton',
'backbone_type': 'res18',
'triton_url': OCR_TRITON_URL,
'triton_port': OCR_TRITON_PORT
}, IS_TEST, 0.3, 5, 5)
# 初始化文本方向检测op
text_orientation_op = GeneralTextOrientationOperator({
'name': 'triton',
'triton_url': OCR_TRITON_URL,
'triton_port': OCR_TRITON_PORT
}, IS_TEST, 0.8)
@celery_worker_app.task(name="ConsumerServices.OCRService.text_recognize")
def text_recognize(_image_info, _box_info):
"""
文本识别
Args:
_image_info: 待识别的完整图像
_box_info: 图像中文本区域的位置
Returns: 文本区域位置的识别结果
"""
to_return_result = {'text': ''}
oss_handler = get_oss_handler()
img = oss_handler.download_image_file(
_image_info['bucket_name'],
_image_info['path']
)
cropped_image = get_rotated_box_roi_from_image(img, _box_info)
get_image_rotation = text_orientation_op.execute(cropped_image)
if get_image_rotation['orientation'] == TextImageOrientation.ORIENTATION_90:
rotated_image, _ = rotate_degree_img(cropped_image, 90)
elif get_image_rotation['orientation'] == TextImageOrientation.ORIENTATION_180:
rotated_image, _ = rotate_degree_img(cropped_image, 180)
elif get_image_rotation['orientation'] == TextImageOrientation.ORIENTATION_270:
rotated_image, _ = rotate_degree_img(cropped_image, 270)
else:
rotated_image = cropped_image
recognize_result = text_recognize_op.execute(rotated_image)
to_return_result['text'] = recognize_result['text']
return to_return_result
class TextRecognizeServiceTask(ServiceTask):
service_version = 'v1.0.20210315'
service_name = 'text_recognize'
mock_result = {
'text': '',
}
require_field = {
"_image_info",
"_box_info",
}
binding_service = text_recognize
@celery_worker_app.task(name="ConsumerServices.OCRService.text_detect")
def text_detect(_image_info):
"""
文本检测
Args:
_image_info: 待检测的图像信息
Returns: 检测得到的所有box
"""
to_return_result = {'box_info': [], 'box_count': 0}
oss_handler = get_oss_handler()
img = oss_handler.download_image_file(
_image_info['bucket_name'],
_image_info['path']
)
if max(img.shape[:2]) > 1024:
candidate_img = resize_with_long_side(img, 1024)
else:
candidate_img = img
detect_result = db_res18_op.execute(candidate_img)
for m_box in detect_result['locations']:
m_box_info = m_box['box_info']
m_box_score = m_box['score']
to_return_result['box_info'].append({
'degree': m_box_info['degree'],
'center_x': m_box_info['center_x'],
'center_y': m_box_info['center_y'],
'box_height': m_box_info['box_height'],
'box_width': m_box_info['box_width'],
'score': m_box_score,
})
to_return_result['box_count'] = len(detect_result['locations'])
return to_return_result
class TextDetectServiceTask(ServiceTask):
service_version = 'v1.0.20210315'
service_name = 'text_detect'
mock_result = {
'box_info': [
{
'degree': 0,
'center_x': 0.23,
'center_y': 0.17,
'box_height': 0.22,
'box_width': 0.55,
'score': 0.98,
},
],
'box_count': 1
}
require_field = {
"_image_info",
}
binding_service = text_detect
@celery_worker_app.task(name="ConsumerServices.OCRService.ocr_result_visualization")
def ocr_result_visualization(_image_info, _box_info_list, _text_list):
"""
将检测的结果和识别的结果合并到一张图中
Args:
_image_info: 图片信息
_box_info_list: 所有box的列表
_text_list: 跟box顺序一致的识别结果
Returns: 合并后的图片的oss的路径
"""
to_return_result = {'bucket_name': '', 'path': '', 'url': ''}
oss_handler = get_oss_handler()
img = oss_handler.download_image_file(
_image_info['bucket_name'],
_image_info['path']
)
annotate_image = force_convert_image_to_bgr(img)
result_image = annotate_detect_rotated_bbox_and_text_result(annotate_image,
_box_info_list,
_text_list,
(0, 0, 255),
3)
date_string = get_date_string()
uuid_name = get_uuid_name()
image_path = os.path.join(date_string, uuid_name)
final_image_path = oss_handler.upload_image_file('result', image_path, result_image, True, 90)
to_return_result['bucket_name'] = 'result'
to_return_result['path'] = final_image_path
to_return_result['url'] = oss_handler.get_retrieve_url('result', final_image_path)
return to_return_result
class OCRResultVisualizationServiceTask(ServiceTask):
service_version = 'v1.0.20210317'
service_name = 'ocr_result_visualization'
mock_result = {'bucket_name': 'result', 'path': 'fake/path.webp'}
require_field = {
"_image_info",
"_box_info_list",
"_text_list"
}
binding_service = ocr_result_visualization
|
11532313
|
from typing import List, Optional
from autofit.mapper.prior_model.abstract import AbstractPriorModel
from autofit.non_linear.samples import Samples, Sample
class DrawerSamples(Samples):
def __init__(
self,
model: AbstractPriorModel,
parameter_lists: List[List[float]],
log_posterior_list: List[float],
time: Optional[float] = None,
):
"""
Create an *Samples* object from this non-linear search's output files on the hard-disk and model.
For Drawer, all quantities are extracted via pickled states of the particle and cost histories.
Parameters
----------
model
The model which generates instances for different points in parameter space. This maps the points from unit
cube values to physical values via the priors.
"""
self._log_posterior_list = log_posterior_list
log_prior_list = [
sum(model.log_prior_list_from_vector(vector=vector)) for vector in parameter_lists
]
log_likelihood_list = [lp - prior for lp, prior in zip(self._log_posterior_list, log_prior_list)]
weight_list = len(log_likelihood_list) * [1.0]
sample_list = Sample.from_lists(
model=model,
parameter_lists=parameter_lists,
log_likelihood_list=log_likelihood_list,
log_prior_list=log_prior_list,
weight_list=weight_list
)
super().__init__(
model=model,
sample_list=sample_list,
time=time,
)
|
11532316
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("bsvsbpix")
#prepare options
options = VarParsing.VarParsing("analysis")
options.register ('globalTag',
"DONOTEXIST",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"GlobalTag")
#options.globalTag = "DONOTEXIST"
options.parseArguments()
#
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
fileMode = cms.untracked.string("FULLMERGE")
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cout.enable = cms.untracked.bool(True)
process.MessageLogger.cout.threshold = cms.untracked.string("INFO")
process.MessageLogger.cout.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cout.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000)
)
process.MessageLogger.cerr.enable = cms.untracked.bool(True)
process.MessageLogger.cerr.threshold = cms.untracked.string("WARNING")
process.MessageLogger.cerr.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100000)
)
#----Remove too verbose PrimaryVertexProducer
process.MessageLogger.suppressInfo.append("pixelVerticesAdaptive")
process.MessageLogger.suppressInfo.append("pixelVerticesAdaptiveNoBS")
#----Remove too verbose BeamSpotOnlineProducer
process.MessageLogger.suppressInfo.append("testBeamSpot")
process.MessageLogger.suppressInfo.append("onlineBeamSpot")
process.MessageLogger.suppressWarning.append("testBeamSpot")
process.MessageLogger.suppressWarning.append("onlineBeamSpot")
#----Remove too verbose TrackRefitter
process.MessageLogger.suppressInfo.append("newTracksFromV0")
process.MessageLogger.suppressInfo.append("newTracksFromOtobV0")
#------------------------------------------------------------------
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
# skipBadFiles = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
)
process.load("RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi")
process.onlineBeamSpotProducer.setSigmaZ = cms.double(-1.)
from DPGAnalysis.SiStripTools.occupancyplotsselections_bpixladders_cff import *
process.spclusoccuprod = cms.EDProducer("SiPixelClusterMultiplicityProducer",
clusterdigiCollection = cms.InputTag("siPixelClusters"),
withClusterSize = cms.untracked.bool(True),
wantedSubDets = cms.VPSet()
)
process.spclusoccuprod.wantedSubDets.extend(OccupancyPlotsBPIXLadders)
process.spclusmultprod = process.spclusoccuprod.clone(withClusterSize = cms.untracked.bool(False))
process.load("DPGAnalysis.SiStripTools.occupancyplots_cfi")
process.occupancyplots.wantedSubDets = process.spclusmultprod.wantedSubDets
process.occupancyplots.multiplicityMaps = cms.VInputTag(cms.InputTag("spclusmultprod"))
process.occupancyplots.occupancyMaps = cms.VInputTag(cms.InputTag("spclusoccuprod"))
process.load("Validation.RecoVertex.bspvanalyzer_cfi")
process.bspvanalyzer.pvCollection = cms.InputTag("goodVertices")
process.bspvanalyzer.bspvHistogramMakerPSet.histoParameters = cms.untracked.PSet(
nBinX = cms.untracked.uint32(2000), xMin=cms.untracked.double(-0.2), xMax=cms.untracked.double(0.2),
nBinY = cms.untracked.uint32(2000), yMin=cms.untracked.double(-0.2), yMax=cms.untracked.double(0.2),
nBinZ = cms.untracked.uint32(200), zMin=cms.untracked.double(-30.), zMax=cms.untracked.double(30.),
nBinZProfile = cms.untracked.uint32(60), zMinProfile=cms.untracked.double(-30.), zMaxProfile=cms.untracked.double(30.)
)
#process.bspvanalyzer.bspvHistogramMakerPSet.runHisto = cms.untracked.bool(True) # This is true by default
process.bspvanalyzer.bspvHistogramMakerPSet.runHistoBX2D = cms.untracked.bool(True)
process.bspvnoslope = process.bspvanalyzer.clone()
process.bspvnoslope.bspvHistogramMakerPSet.useSlope = cms.bool(False)
process.load("Validation.RecoVertex.beamspotanalyzer_cfi")
process.onlinebeamspotanalyzer = process.beamspotanalyzer.clone(bsCollection = cms.InputTag("onlineBeamSpotProducer"))
process.load("Validation.RecoVertex.anotherprimaryvertexanalyzer_cfi")
process.primaryvertexanalyzer.vHistogramMakerPSet.runHistoBXProfile2D = cms.untracked.bool(True)
process.primaryvertexanalyzer.vHistogramMakerPSet.runHisto2D = cms.untracked.bool(True)
process.load("Validation.RecoVertex.pvSelectionSequence_cff")
process.p0 = cms.Path(process.onlineBeamSpotProducer +
process.spclusoccuprod + process.spclusmultprod +
process.occupancyplots +
process.goodVertices +
process.beamspotanalyzer + process.onlinebeamspotanalyzer +
process.primaryvertexanalyzer +
process.bspvanalyzer + process.bspvnoslope)
#----GlobalTag ------------------------
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.TFileService = cms.Service('TFileService',
fileName = cms.string('bsvsbpix.root')
)
|
11532359
|
from monai.inferers import SlidingWindowInferer
import torch
from typing import Callable, Any
from loguru import logger
class SlidingWindowInferer(SlidingWindowInferer):
def __init__(self, *args, **kwargs):
self.logger = logger
super().__init__(*args, **kwargs)
def __call__(
self,
inputs: torch.Tensor,
network: Callable[..., torch.Tensor],
*args: Any,
**kwargs: Any,
) -> torch.Tensor:
# Check if roi size and full volume size are not matching
if len(self.roi_size) != len(inputs.shape[2:]):
self.logger.debug(
f"ROI size: {self.roi_size} and input volume: {inputs.shape[2:]} do not match \n"
"Brodcasting ROI size to match input volume size.")
# If they do not match and roi_size is 2D add another dimension to roi size
if len(self.roi_size) == 2:
self.roi_size = [1, *self.roi_size]
else:
raise RuntimeError("Unsupported roi size, cannot broadcast to volume. ")
return super().__call__(inputs, lambda x: self.network_wrapper(network, x))
def network_wrapper(self, network, x, *args, **kwargs):
"""
Wrapper handles cases where inference needs to be done using
2D models over 3D volume inputs.
"""
# If depth dim is 1 in [D, H, W] roi size, then the input is 2D and needs
# be handled accordingly
if self.roi_size[0] == 1:
# Pass [N, C, H, W] to the model as it is 2D.
x = x.squeeze(dim=2)
out = network(x, *args, **kwargs)
# Unsqueeze the network output so it is [N, C, D, H, W]
return out.unsqueeze(dim=2)
else:
return network(x, *args, **kwargs)
|
11532365
|
import pytari2600.memory.cartridge as cartridge
import unittest
import pkg_resources
class TestCartridge(unittest.TestCase):
def test_cartridge(self):
cart = cartridge.GenericCartridge(pkg_resources.resource_filename(__name__, 'dummy_rom.bin'), 4, 0x1000, 0xFF9, 0x0)
# Write should do nothing
cart.write(0,7)
self.assertEqual(cart.read(0), 0)
self.assertEqual(cart.read(3), 3)
self.assertEqual(cart.read(2048+2), 2)
def test_ram_cartridge(self):
cart = cartridge.GenericCartridge(pkg_resources.resource_filename(__name__, 'dummy_rom.bin'), 4, 0x1000, 0xFF9, 0x080)
# Write should go to ram.
cart.write(0,7)
self.assertEqual(cart.read(0x80), 7)
cart.write(0,31)
self.assertEqual(cart.read(0x80), 31)
if __name__ == '__main__':
unittest.main()
|
11532379
|
from typing import Any
import pytest
from pydantic import BaseModel
from pydantic_factories import (
AsyncPersistenceProtocol,
ModelFactory,
SyncPersistenceProtocol,
)
class MyModel(BaseModel):
name: str
class MySyncPersistenceHandler(SyncPersistenceProtocol):
def save(self, data: Any, *args, **kwargs) -> Any:
return data
def save_many(self, data: Any, *args, **kwargs) -> Any:
return data
class MyAsyncPersistenceHandler(AsyncPersistenceProtocol):
async def save(self, data: Any, *args, **kwargs) -> Any:
return data
async def save_many(self, data: Any, *args, **kwargs) -> Any:
return data
def test_sync_persistence_handler_is_set_and_called_with_instance():
class MyFactory(ModelFactory):
__model__ = MyModel
__sync_persistence__ = MySyncPersistenceHandler()
assert MyFactory.create_sync().name
assert [instance.name for instance in MyFactory.create_batch_sync(size=2)]
def test_sync_persistence_handler_is_set_and_called_with_class():
class MyFactory(ModelFactory):
__model__ = MyModel
__sync_persistence__ = MySyncPersistenceHandler
assert MyFactory.create_sync().name
assert [instance.name for instance in MyFactory.create_batch_sync(size=2)]
@pytest.mark.asyncio
async def test_async_persistence_handler_is_set_and_called_with_instance():
class MyFactory(ModelFactory):
__model__ = MyModel
__async_persistence__ = MyAsyncPersistenceHandler()
assert (await MyFactory.create_async()).name
assert [instance.name for instance in (await MyFactory.create_batch_async(size=2))]
@pytest.mark.asyncio
async def test_async_persistence_handler_is_set_and_called_with_class():
class MyFactory(ModelFactory):
__model__ = MyModel
__async_persistence__ = MyAsyncPersistenceHandler
assert (await MyFactory.create_async()).name
assert [instance.name for instance in (await MyFactory.create_batch_async(size=2))]
|
11532383
|
from pyspark.mllib.recommendation import ALS
from numpy import array
# Load and parse the data
data = sc.textFile("data/mllib/als/test.data")
ratings = data.map(lambda line: array([float(x) for x in line.split(',')]))
# Build the recommendation model using Alternating Least Squares
rank = 10
numIterations = 20
model = ALS.train(ratings, rank, numIterations)
# Evaluate the model on training data
testdata = ratings.map(lambda p: (int(p[0]), int(p[1])))
predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))
ratesAndPreds = ratings.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)
MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).reduce(lambda x, y: x + y)/ratesAndPreds.count()
print("Mean Squared Error = " + str(MSE))
|
11532412
|
import unittest
import torch
import torch.nn
from torch.autograd import Variable
import memcnn.models.revop as revop
import numpy as np
import copy
class ReversibleOperationsTestCase(unittest.TestCase):
def test_reversible_block(self):
"""ReversibleBlock test
* test inversion Y = RB(X) and X = RB.inverse(Y)
* test training the block for a single step and compare weights for implementations 0, 1
* test automatic discard of input X and its retrieval after the backward pass
* test usage of BN to identify non-contiguous memory blocks
"""
dims = (2, 10, 8, 8)
data = np.random.random(dims).astype(np.float32)
target_data = np.random.random(dims).astype(np.float32)
impl_out, impl_grad = [], []
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(10 // 2)
self.conv = torch.nn.Conv2d(10 // 2, 10 // 2, (3, 3), padding=1)
def forward(self, x):
return self.bn(self.conv(x))
Gm = SubModule()
s_grad = [p.data.numpy().copy() for p in Gm.parameters()]
implementations_fwd = [0, 0, 1, 1]
implementations_bwd = [0, 0, 1, 1]
for coupling in ['additive', 'affine']:
for keep_input in [False, True]:
for implementation_fwd in implementations_fwd:
for implementation_bwd in implementations_bwd:
# same convolution test
X = Variable(torch.from_numpy(data.copy()))
Ytarget = Variable(torch.from_numpy(target_data.copy()))
Xshape = X.shape
Gm2 = copy.deepcopy(Gm)
rb = revop.ReversibleBlock(Gm2, coupling=coupling,
keep_input=keep_input,
implementation_fwd=implementation_fwd,
implementation_bwd=implementation_bwd)
rb.train()
rb.zero_grad()
optim = torch.optim.RMSprop(rb.parameters())
optim.zero_grad()
Y = rb(X)
Xinv = rb.inverse(Y.clone())
loss = torch.nn.MSELoss()(Y, Ytarget)
# has input been retained/discarded after forward pass?
if keep_input:
self.assertTrue(X.data.shape == Xshape)
else:
self.assertTrue(len(X.data.shape) == 0 or (len(X.data.shape) == 1 and X.data.shape[0] == 0))
optim.zero_grad()
loss.backward()
optim.step()
self.assertTrue(Y.shape == Xshape)
self.assertTrue(X.data.numpy().shape == data.shape)
self.assertTrue(np.allclose(X.data.numpy(), data, atol=1e-06))
self.assertTrue(np.allclose(X.data.numpy(), Xinv.data.numpy()))
impl_out.append(Y.data.numpy().copy())
impl_grad.append([p.data.numpy().copy() for p in Gm2.parameters()])
self.assertFalse(np.allclose(impl_grad[-1][0], s_grad[0]))
# output and gradients per implementation similar ?
self.assertTrue(np.allclose(impl_out[0], impl_out[1]))
for i in range(0, len(implementations_bwd) * len(implementations_fwd) - 1, 1):
self.assertTrue(np.allclose(impl_grad[i][0], impl_grad[i + 1][0]))
def test_reversible_block_inv(self):
"""ReversibleBlock test using inverse training
* test inversion X = RB.inverse(Y) and Y = RB(X)
* test training the block for a single step and compare weights for implementations 0, 1
* test automatic discard of input Y and its retrieval after the backward pass of inverse
* test usage of BN to identify non-contiguous memory blocks
"""
dims = (2, 10, 8, 8)
data = np.random.random(dims).astype(np.float32)
target_data = np.random.random(dims).astype(np.float32)
impl_out, impl_grad = [], []
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(10 // 2)
self.conv = torch.nn.Conv2d(10 // 2, 10 // 2, (3, 3), padding=1)
def forward(self, x):
return self.bn(self.conv(x))
Gm = SubModule()
s_grad = [p.data.numpy().copy() for p in Gm.parameters()]
implementations_fwd = [0, 0, 1, 1]
implementations_bwd = [0, 0, 1, 1]
for coupling in ['additive', 'affine']:
for keep_input in [False, True]:
for implementation_fwd in implementations_fwd:
for implementation_bwd in implementations_bwd:
# same convolution test
Y = Variable(torch.from_numpy(data.copy()))
Xtarget = Variable(torch.from_numpy(target_data.copy()))
Yshape = Y.shape
Gm2 = copy.deepcopy(Gm)
rb = revop.ReversibleBlock(Gm2, coupling=coupling,
keep_input=keep_input,
implementation_fwd=implementation_fwd,
implementation_bwd=implementation_bwd)
rb.train()
rb.zero_grad()
optim = torch.optim.RMSprop(rb.parameters())
optim.zero_grad()
X = rb.inverse(Y)
Yinv = rb(X.clone())
loss = torch.nn.MSELoss()(X, Xtarget)
# has input been retained/discarded after forward pass?
if keep_input:
self.assertTrue(Y.data.shape == Yshape)
else:
self.assertTrue(len(Y.data.shape) == 0 or (len(Y.data.shape) == 1 and Y.data.shape[0] == 0))
optim.zero_grad()
loss.backward()
optim.step()
self.assertTrue(X.shape == Yshape)
self.assertTrue(Y.data.numpy().shape == data.shape)
self.assertTrue(np.allclose(Y.data.numpy(), data, atol=1e-06))
self.assertTrue(np.allclose(Y.data.numpy(), Yinv.data.numpy()))
impl_out.append(X.data.numpy().copy())
impl_grad.append([p.data.numpy().copy() for p in Gm2.parameters()])
self.assertFalse(np.allclose(impl_grad[-1][0], s_grad[0]))
# output and gradients per implementation similar ?
self.assertTrue(np.allclose(impl_out[0], impl_out[1]))
for i in range(0, len(implementations_bwd) * len(implementations_fwd) - 1, 1):
self.assertTrue(np.allclose(impl_grad[i][0], impl_grad[i + 1][0]))
def test_normal_vs_revblock(self):
"""ReversibleBlock test if similar gradients and weights results are obtained after similar training
* test training the block for a single step and compare weights and grads for implementations 0, 1 and 2
* test against normal non Reversible Block function
* test if recreated input and produced output are contiguous
"""
for implementation_fwd in [0, 1]:
for implementation_bwd in [0, 1]:
X = Variable(torch.rand(2, 4, 5, 5))
# define models and their copies
c1 = torch.nn.Conv2d(2, 2, 3, padding=1)
c2 = torch.nn.Conv2d(2, 2, 3, padding=1)
c1_2 = copy.deepcopy(c1)
c2_2 = copy.deepcopy(c2)
# are weights between models the same, but do they differ between convolutions?
self.assertTrue(torch.equal(c1.weight, c1_2.weight))
self.assertTrue(torch.equal(c2.weight, c2_2.weight))
self.assertTrue(torch.equal(c1.bias, c1_2.bias))
self.assertTrue(torch.equal(c2.bias, c2_2.bias))
self.assertFalse(torch.equal(c1.weight, c2.weight))
# define optimizers
optim1 = torch.optim.SGD([e for e in c1.parameters()] + [e for e in c2.parameters()], 0.1)
optim2 = torch.optim.SGD([e for e in c1_2.parameters()] + [e for e in c2_2.parameters()], 0.1)
for e in [c1, c2, c1_2, c2_2]:
e.train()
# define an arbitrary reversible function and define graph for model 1
Xin = X.clone()
# TODO: add normal test for affine coupling
fn = revop.ReversibleBlock(c1_2, c2_2, coupling='additive',
keep_input=False,
implementation_fwd=implementation_fwd,
implementation_bwd=implementation_bwd)
Y = fn.forward(Xin)
loss2 = torch.mean(Y)
# define the reversible function without custom backprop and define graph for model 2
XX = Variable(X.clone().data, requires_grad=True)
x1, x2 = torch.chunk(XX, 2, dim=1)
y1 = x1 + c1.forward(x2)
y2 = x2 + c2.forward(y1)
YY = torch.cat([y1, y2], dim=1)
loss = torch.mean(YY)
# compute gradients manually
grads = torch.autograd.grad(loss, (XX, c1.weight, c2.weight, c1.bias, c2.bias), None, retain_graph=True)
# compute gradients and perform optimization model 2
loss.backward()
optim1.step()
# gradients computed manually match those of the .backward() pass
self.assertTrue(torch.equal(c1.weight.grad, grads[1]))
self.assertTrue(torch.equal(c2.weight.grad, grads[2]))
self.assertTrue(torch.equal(c1.bias.grad, grads[3]))
self.assertTrue(torch.equal(c2.bias.grad, grads[4]))
# weights differ after training a single model?
self.assertFalse(torch.equal(c1.weight, c1_2.weight))
self.assertFalse(torch.equal(c2.weight, c2_2.weight))
self.assertFalse(torch.equal(c1.bias, c1_2.bias))
self.assertFalse(torch.equal(c2.bias, c2_2.bias))
# compute gradients and perform optimization model 1
loss2.backward()
optim2.step()
# input is contiguous tests
self.assertTrue(Xin.is_contiguous())
self.assertTrue(Y.is_contiguous())
# weights are approximately the same after training both models?
self.assertTrue(np.allclose(c1.weight.data.numpy(), c1_2.weight.data.numpy()))
self.assertTrue(np.allclose(c2.weight.data.numpy(), c2_2.weight.data.numpy()))
self.assertTrue(np.allclose(c1.bias.data.numpy(), c1_2.bias.data.numpy()))
self.assertTrue(np.allclose(c2.bias.data.numpy(), c2_2.bias.data.numpy()))
# gradients are approximately the same after training both models?
self.assertTrue(np.allclose(c1.weight.grad.data.numpy(), c1_2.weight.grad.data.numpy()))
self.assertTrue(np.allclose(c2.weight.grad.data.numpy(), c2_2.weight.grad.data.numpy()))
self.assertTrue(np.allclose(c1.bias.grad.data.numpy(), c1_2.bias.grad.data.numpy()))
self.assertTrue(np.allclose(c2.bias.grad.data.numpy(), c2_2.bias.grad.data.numpy()))
@unittest.skipIf(not torch.cuda.is_available(), reason='This test requires a GPU to be available')
def test_memory_saving(self):
"""Test memory saving of the reversible block
* tests fitting a large number of images by creating a very deep network requiring big
intermediate feature maps for training
* input size in bytes: np.prod((2, 10, 2000, 2000)) * 4 / (1024 ** 2)
(approx.) = 305 MB
* tuned on a Titan X with 12 GB of RAM (depth=25 will just fit, but depth=250 will clearly not fit)
This will approximately require:
depth=25: 7629 MB
depth=250: 76293 MB
NOTE: This test assumes it is ran on a machine with a GPU with less than +/- 76293 MB
NOTE: This test can be quite slow to execute
"""
dims = (2, 10, 2000, 2000)
data = np.random.random(dims).astype(np.float32)
target_data = np.random.random(dims).astype(np.float32)
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(10 // 2)
self.conv = torch.nn.Conv2d(10 // 2, 10 // 2, (3, 3), padding=1)
def forward(self, x):
return self.bn(self.conv(x))
class SubModuleStack(torch.nn.Module):
def __init__(self, Gm, coupling, depth=10, implementation_fwd=1, implementation_bwd=1, keep_input=False):
super(SubModuleStack, self).__init__()
self.stack = torch.nn.Sequential(
*[revop.ReversibleBlock(Gm, Gm, coupling, keep_input=keep_input, implementation_fwd=implementation_fwd, implementation_bwd=implementation_bwd) for _ in range(depth)]
)
def forward(self, x):
return self.stack(x)
for coupling in ['additive', 'affine']:
for keep_input in [False, True]:
for implementation_fwd in [1]:
for implementation_bwd in [1]:
# same convolution test
X = Variable(torch.from_numpy(data.copy())).cuda()
Ytarget = Variable(torch.from_numpy(target_data.copy())).cuda()
network = SubModuleStack(SubModule(), coupling, depth=250, keep_input=keep_input,
implementation_fwd=implementation_fwd,
implementation_bwd=implementation_bwd)
network.cuda()
network.train()
network.zero_grad()
optim = torch.optim.RMSprop(network.parameters())
optim.zero_grad()
try:
Y = network(X)
loss = torch.nn.MSELoss()(Y, Ytarget)
optim.zero_grad()
loss.backward()
optim.step()
# Should not be reached when input is kept
self.assertFalse(keep_input)
except RuntimeError:
# Running out of memory should only happen when input is kept
self.assertTrue(keep_input)
finally:
del network
del optim
del X
del Ytarget
if __name__ == '__main__':
unittest.main()
|
11532455
|
from setuptools import setup
# see https://stackoverflow.com/questions/14399534/reference-requirements-txt-for-the-install-requires-kwarg-in-setuptools-setup-py
setup(name='fuzzymatcher',
version='0.0.5',
description='Fuzzy match two pandas dataframes based on one or more common fields',
url='https://github.com/RobinL/fuzzymatcher',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['fuzzymatcher'], # The directory to look in for the source code
install_requires=['pandas', 'metaphone', 'python-Levenshtein', 'fuzzywuzzy', 'python-dateutil'],
test_requires=["pylint", "coverage", "codecov"],
keywords=["matching", "fuzzy", "probabalistic", "recordlinking", "fuzzymatching"],
download_url = 'https://github.com/RobinL/fuzzymatcher/archive/v0.0.4.tar.gz',
zip_safe=False)
|
11532501
|
import logging
from flask import current_app
from app.clients.sms.firetext import (
FiretextClient
)
logger = logging.getLogger(__name__)
class LoadtestingClient(FiretextClient):
'''
Loadtest sms client.
'''
def init_app(self, config, statsd_client, *args, **kwargs):
super(FiretextClient, self).__init__(*args, **kwargs)
self.current_app = current_app
self.api_key = config.config.get('LOADTESTING_API_KEY')
self.from_number = config.config.get('FROM_NUMBER')
self.name = 'loadtesting'
self.url = "https://www.firetext.co.uk/api/sendsms/json"
self.statsd_client = statsd_client
|
11532503
|
import os
import string
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, GRU
from tensorflow.keras.losses import sparse_categorical_crossentropy
from tensorflow.keras.models import load_model
from matplotlib import pyplot as plt
vocab = sorted(set(string.printable))
char_to_ind = {u: i for i, u in enumerate(vocab)}
ind_to_char = np.array(vocab)
class Generator(object):
def __init__(self, rnn_neurons=256, embed_dim=64, dropout=0.3, num_layers=2, learning_rate=1e-4):
self.model = None
self.vocab = sorted(set(string.printable))
self.vocab_size = len(self.vocab)
self.hparams = {'rnn_neurons' : rnn_neurons,
'embed_dim' : embed_dim,
'learning_rate' : learning_rate,
'dropout' : dropout,
'num_layers' : num_layers}
def _createModel(self, batch_size):
model = Sequential()
model.add(Embedding(self.vocab_size, self.hparams['embed_dim'],batch_input_shape=[batch_size, None]))
for _ in range(self.hparams['num_layers']):
model.add(GRU(self.hparams['rnn_neurons'] ,return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform', dropout=self.hparams['dropout']))
model.add(Dense(self.vocab_size))
opt = tf.keras.optimizers.Adam(learning_rate=self.hparams['learning_rate'])
model.compile(optimizer=opt, loss=self._sparse_cat_loss)
self.model = model
def _sparse_cat_loss(self, y_true, y_pred):
return sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
def load_weights(self, weight_file_path):
'''
Constructs the model and loads the weights
Parameters:
weight_file_path (str): Path to weights location
Returns:
None
'''
if os.path.exists(weight_file_path):
self._createModel(batch_size = 1)
self.model.load_weights(weight_file_path)
self.model.build(tf.TensorShape([1, None]))
else:
raise FileNotFoundError
def train(self, data, epochs=1, verbose=1, save_at=5, cuda=False):
'''
Trains the model for a given number of epochs
Parameters:
epochs (int) : number of epochs to train on
verbose (bool) : to print loss and epoch number of not to
save_at (int) : to save at ever n th epoch
Returns:
None
'''
self._createModel(batch_size = 128)
if cuda:
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
with tf.device('/device:GPU:0'):
for epoch in range(1, epochs + 1):
print(f'Epoch {epoch}/{epochs}')
self.model.fit(data, epochs=1, verbose=verbose)
rnnNeurons=self.hparams['rnn_neurons']
if (epoch + 1) % save_at == 0:
self.model.save(f'model-{epoch}-epochs-{rnnNeurons}-neurons.h5')
else:
for epoch in range(1, epochs + 1):
print(f'Epoch {epoch}/{epochs}')
self.model.fit(data, epochs=1, verbose=verbose)
rnnNeurons=self.hparams['rnn_neurons']
if (epoch + 1) % save_at == 0:
self.model.save(f'model-{epoch}-epochs-{rnnNeurons}-neurons.h5')
def predict(self, start_seed, gen_size=100, temp=random.uniform(0, 1)):
'''
Generates further texts according to the seed text
Parameters:
start_seed (str) : seed that model will use to generate further texts
gen_size (int) : number of characters to generate 700 - 1000 are the most ideal ones
Returns:
None
'''
if self.model is None:
raise ValueError('Model Object cannot be NoneType')
self.model.save_weights('model_weights.h5')
self.load_weights('model_weights.h5')
os.remove('model_weights.h5')
num_generate = gen_size
input_eval = [char_to_ind[s] for s in start_seed]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
temperature = temp
self.model.reset_states()
for _ in range(num_generate):
predictions = self.model(input_eval)
predictions = tf.squeeze(predictions, 0)
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1, 0].numpy()
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(ind_to_char[predicted_id])
return (start_seed + ''.join(text_generated))
def hyperparams(self):
print('Hyper Parameters')
print('+--------------------------+')
for key, value in self.hparams.items():
print("|{: <13} | {: >10}|".format(key, value))
print('+--------------------------+')
def summary(self):
self.model.summary()
@property
def __doc__(self):
return '''
Generator object can construct the model,
save the weights, load the weights train the model,
and make predictions
---------------------------------------------------
Trainging example :
model = Generator() # creating an instance of model
model.train(dataset, epochs = 5, verbose=1, save_at=1) # training the model
----------------------------------------------------
Continue training from a saved weights file :
model = Generator() # creating an instance of model
model.load_weights('model-3-epochs.h5', mode = 'training') # loading the weights
model.train(dataset, epochs = 5, verbose=1, save_at=1) # training the model
-----------------------------------------------------
Preditction example :
model = Generator() # creating an instance of model
model.load_weights('model-10-epochs.h5') # loading the weights
print(model.predict('hello')) # making prediction and printing
-----------------------------------------------------
'''
|
11532519
|
from typing_extensions import final
from typing import Any, cast
from .tensor import Tensor
from .tensor import TensorType
from .tensor import TensorOrScalar
def unwrap_(*args: Any) -> Any:
return tuple(t.raw if isinstance(t, Tensor) else t for t in args)
def unwrap1(t: Any) -> Any:
return t.raw if isinstance(t, Tensor) else t
class BaseTensor(Tensor):
__slots__ = "_raw"
def __init__(self: TensorType, raw: Any):
assert not isinstance(raw, Tensor)
self._raw = raw
@property
def raw(self) -> Any:
return self._raw
@final
def __repr__(self: TensorType) -> str:
lines = repr(self.raw).split("\n")
prefix = self.__class__.__name__ + "("
lines[0] = prefix + lines[0]
prefix = " " * len(prefix)
for i in range(1, len(lines)):
lines[i] = prefix + lines[i]
lines[-1] = lines[-1] + ")"
return "\n".join(lines)
@final
def __format__(self: TensorType, format_spec: str) -> str:
return format(self.raw, format_spec)
@final
@property
def dtype(self: TensorType) -> Any:
return self.raw.dtype
@final
def __bool__(self: TensorType) -> bool:
return bool(self.raw)
@final
def __len__(self: TensorType) -> int:
return cast(int, self.raw.shape[0])
@final
def __abs__(self: TensorType) -> TensorType:
return type(self)(abs(self.raw))
@final
def __neg__(self: TensorType) -> TensorType:
return type(self)(-self.raw)
@final
def __add__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__add__(unwrap1(other)))
@final
def __radd__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__radd__(unwrap1(other)))
@final
def __sub__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__sub__(unwrap1(other)))
@final
def __rsub__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__rsub__(unwrap1(other)))
@final
def __mul__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__mul__(unwrap1(other)))
@final
def __rmul__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__rmul__(unwrap1(other)))
@final
def __truediv__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__truediv__(unwrap1(other)))
@final
def __rtruediv__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__rtruediv__(unwrap1(other)))
@final
def __floordiv__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__floordiv__(unwrap1(other)))
@final
def __rfloordiv__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__rfloordiv__(unwrap1(other)))
@final
def __mod__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__mod__(unwrap1(other)))
@final
def __pow__(self: TensorType, exponent: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__pow__(unwrap1(exponent)))
@final
@property
def ndim(self: TensorType) -> int:
return len(self.raw.shape)
|
11532598
|
import numpy as np
import pytest
from neuraxle.base import ExecutionMode
from neuraxle.pipeline import Pipeline
from neuraxle.steps.flow import ReversiblePreprocessingWrapper
from neuraxle.steps.misc import TapeCallbackFunction, CallbackWrapper
from neuraxle.steps.numpy import MultiplyByN, AddN
from testing.steps.neuraxle_test_case import NeuraxleTestCase
DATA_INPUTS = np.array(range(5))
EXPECTED_OUTPUTS = np.array(range(5, 10))
EXPECTED_PROCESSED_OUTPUTS = np.array([5.0, 6.0, 7.0, 8.0, 9.0])
tape_transform_preprocessing = TapeCallbackFunction()
tape_fit_preprocessing = TapeCallbackFunction()
tape_transform_postprocessing = TapeCallbackFunction()
tape_fit_postprocessing = TapeCallbackFunction()
tape_inverse_transform_preprocessing = TapeCallbackFunction()
@pytest.mark.parametrize('test_case', [
NeuraxleTestCase(
pipeline=Pipeline([
ReversiblePreprocessingWrapper(
preprocessing_step=CallbackWrapper(MultiplyByN(2), tape_transform_preprocessing, tape_fit_postprocessing, tape_inverse_transform_preprocessing),
postprocessing_step=CallbackWrapper(AddN(10), tape_transform_postprocessing, tape_fit_postprocessing)
)]
),
callbacks=[tape_transform_preprocessing, tape_fit_preprocessing, tape_transform_postprocessing, tape_fit_postprocessing, tape_inverse_transform_preprocessing],
expected_callbacks_data=[
[DATA_INPUTS],
[],
[DATA_INPUTS * 2],
[],
[(DATA_INPUTS * 2) + 10]
],
data_inputs=DATA_INPUTS,
expected_processed_outputs=EXPECTED_PROCESSED_OUTPUTS,
execution_mode=ExecutionMode.TRANSFORM
),
NeuraxleTestCase(
pipeline=Pipeline([
ReversiblePreprocessingWrapper(
preprocessing_step=CallbackWrapper(MultiplyByN(2), tape_transform_preprocessing, tape_fit_preprocessing, tape_inverse_transform_preprocessing),
postprocessing_step=CallbackWrapper(AddN(10), tape_transform_postprocessing, tape_fit_postprocessing)
)]
),
callbacks=[tape_transform_preprocessing, tape_fit_preprocessing, tape_transform_postprocessing, tape_fit_postprocessing, tape_inverse_transform_preprocessing],
expected_callbacks_data=[
[DATA_INPUTS],
[(DATA_INPUTS, EXPECTED_OUTPUTS)],
[DATA_INPUTS * 2],
[(DATA_INPUTS * 2, EXPECTED_OUTPUTS)],
[(DATA_INPUTS * 2) + 10]
],
data_inputs=DATA_INPUTS,
expected_outputs=EXPECTED_OUTPUTS,
expected_processed_outputs=EXPECTED_PROCESSED_OUTPUTS,
execution_mode=ExecutionMode.FIT_TRANSFORM
),
NeuraxleTestCase(
pipeline=Pipeline([
ReversiblePreprocessingWrapper(
preprocessing_step=CallbackWrapper(MultiplyByN(2), tape_transform_preprocessing, tape_fit_preprocessing, tape_inverse_transform_preprocessing),
postprocessing_step=CallbackWrapper(AddN(10), tape_transform_postprocessing, tape_fit_postprocessing)
)]
),
callbacks=[tape_transform_preprocessing, tape_fit_preprocessing, tape_transform_postprocessing, tape_fit_postprocessing, tape_inverse_transform_preprocessing],
expected_callbacks_data=[
[DATA_INPUTS],
[(DATA_INPUTS, EXPECTED_OUTPUTS)],
[],
[(DATA_INPUTS * 2, EXPECTED_OUTPUTS)],
[]
],
data_inputs=DATA_INPUTS,
expected_outputs=EXPECTED_OUTPUTS,
execution_mode=ExecutionMode.FIT
)
])
def test_reversible_preprocessing_wrapper(test_case):
processed_outputs = test_case.execute()
test_case.assert_expected_processed_outputs(processed_outputs)
test_case.assert_callback_data_is_as_expected()
|
11532601
|
CONF_MAINNET = {
"fullnode": "https://api.trongrid.io",
"event": "https://api.trongrid.io",
}
# The long running, maintained by the tron-us community
CONF_SHASTA = {
"fullnode": "https://api.shasta.trongrid.io",
"event": "https://api.shasta.trongrid.io",
"faucet": "https://www.trongrid.io/faucet",
}
# Maintained by the official team
CONF_NILE = {
"fullnode": "https://api.nileex.io",
"event": "https://event.nileex.io",
"faucet": "http://nileex.io/join/getJoinPage",
}
# Maintained by the official team
CONF_TRONEX = {
"fullnode": "https://testhttpapi.tronex.io",
"event": "https://testapi.tronex.io",
"faucet": "http://testnet.tronex.io/join/getJoinPage",
}
ALL = {
"mainnet": CONF_MAINNET,
"nile": CONF_NILE,
"shasta": CONF_SHASTA,
"tronex": CONF_TRONEX,
}
def conf_for_name(name: str) -> dict:
return ALL.get(name, None)
|
11532647
|
from typing import Type, TypeVar
from copy import deepcopy
from datapipelines import DataTransformer, PipelineContext
from ..core.championmastery import ChampionMasteryData, ChampionMasteryListData, ChampionMastery, ChampionMasteries
from ..dto.championmastery import ChampionMasteryDto, ChampionMasteryListDto
T = TypeVar("T")
F = TypeVar("F")
class ChampionMasteryTransformer(DataTransformer):
@DataTransformer.dispatch
def transform(self, target_type: Type[T], value: F, context: PipelineContext = None) -> T:
pass
# Dto to Data
@transform.register(ChampionMasteryDto, ChampionMasteryData)
def champion_mastery_dto_to_data(self, value: ChampionMasteryDto, context: PipelineContext = None) -> ChampionMasteryData:
return ChampionMasteryData(**value)
@transform.register(ChampionMasteryListDto, ChampionMasteryListData)
def champion_mastery_list_dto_to_data(self, value: ChampionMasteryListDto, context: PipelineContext = None) -> ChampionMasteryListData:
data = deepcopy(value)
data["masteries"] = [self.champion_mastery_dto_to_data(c) for c in data["masteries"]]
for c in data["masteries"]:
c(region=data["region"])
data = data["masteries"]
return ChampionMasteryListData(data, region=value["region"], summoner_id=value["summonerId"])
# Data to Core
<EMAIL>(ChampionMasteryData, ChampionMastery)
def champion_mastery_data_to_core(self, value: ChampionMasteryData, context: PipelineContext = None) -> ChampionMastery:
return ChampionMastery.from_data(value)
<EMAIL>(ChampionMasteryListData, ChampionMasteries)
def champion_mastery_list_data_to_core(self, value: ChampionMasteryListData, context: PipelineContext = None) -> ChampionMasteries:
return ChampionMasteries.from_data(*[self.champion_mastery_data_to_core(cm) for cm in value], region=value.region, summoner=value.summoner_id)
|
11532655
|
import json
import sys
import iotbx.phil
from cctbx.miller.display import render_2d, scene
from dials.util import Sorry
from iotbx.gui_tools.reflections import get_array_description
from iotbx.reflection_file_reader import any_reflection_file
from scitbx.array_family import flex
class MultiplicityViewPng(render_2d):
def __init__(self, scene, settings=None):
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
render_2d.__init__(self, scene, settings)
self._open_circle_points = flex.vec2_double()
self._open_circle_radii = []
self._open_circle_colors = []
self._filled_circle_points = flex.vec2_double()
self._filled_circle_radii = []
self._filled_circle_colors = []
self.fig, self.ax = pyplot.subplots(figsize=self.settings.size_inches)
self.render(self.ax)
pyplot.close()
def GetSize(self):
return self.fig.get_size_inches() * self.fig.dpi # size in pixels
def draw_line(self, ax, x1, y1, x2, y2):
ax.plot([x1, x2], [y1, y2], c=self._foreground)
def draw_text(self, ax, text, x, y):
ax.text(x, y, text, color=self._foreground, size=self.settings.font_size)
def draw_open_circle(self, ax, x, y, radius, color=None):
self._open_circle_points.append((x, y))
self._open_circle_radii.append(2 * radius)
if color is None:
color = self._foreground
self._open_circle_colors.append(color)
def draw_filled_circle(self, ax, x, y, radius, color):
self._filled_circle_points.append((x, y))
self._filled_circle_radii.append(2 * radius)
self._filled_circle_colors.append(color)
def render(self, ax):
from matplotlib import pyplot
from matplotlib import colors
render_2d.render(self, ax)
if self._open_circle_points.size():
x, y = self._open_circle_points.parts()
ax.scatter(
x.as_numpy_array(),
y.as_numpy_array(),
s=self._open_circle_radii,
marker="o",
edgecolors=self._open_circle_colors,
facecolors=None,
)
if self._filled_circle_points.size():
x, y = self._filled_circle_points.parts()
# use pyplot colormaps then we can more easily get a colorbar
data = self.scene.multiplicities.data()
cmap_d = {
"heatmap": "hot",
"redblue": colors.LinearSegmentedColormap.from_list(
"RedBlue", ["b", "r"]
),
"grayscale": "Greys_r" if self.settings.black_background else "Greys",
"mono": (
colors.LinearSegmentedColormap.from_list("mono", ["w", "w"])
if self.settings.black_background
else colors.LinearSegmentedColormap.from_list(
"mono", ["black", "black"]
)
),
}
cm = cmap_d.get(self.settings.color_scheme, self.settings.color_scheme)
if isinstance(cm, str):
cm = pyplot.cm.get_cmap(cm)
im = ax.scatter(
x.as_numpy_array(),
y.as_numpy_array(),
s=self._filled_circle_radii,
marker="o",
c=data.select(self.scene.slice_selection).as_numpy_array(),
edgecolors="none",
vmin=0,
vmax=flex.max(data),
cmap=cm,
)
# colorbar
cb = self.fig.colorbar(im, ax=ax)
[t.set_color(self._foreground) for t in cb.ax.get_yticklabels()]
[t.set_fontsize(self.settings.font_size) for t in cb.ax.get_yticklabels()]
self.ax.set_aspect("equal")
self.ax.set_facecolor(self._background)
xmax, ymax = self.GetSize()
ax.set_xlim(0, xmax)
ax.set_ylim(0, ymax)
ax.invert_yaxis()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
self.fig.tight_layout()
self.fig.savefig(
self.settings.plot.filename, bbox_inches="tight", facecolor=self._background
)
class MultiplicityViewJson(render_2d):
def __init__(self, scene, settings=None):
render_2d.__init__(self, scene, settings)
self._open_circle_points = flex.vec2_double()
self._open_circle_radii = []
self._open_circle_colors = []
self._filled_circle_points = flex.vec2_double()
self._filled_circle_radii = []
self._filled_circle_colors = []
self._text = {"x": [], "y": [], "text": []}
self._lines = []
json_d = self.render(None)
if self.settings.json.compact:
indent = None
else:
indent = 2
with open(self.settings.json.filename, "w") as fh:
json.dump(json_d, fh, indent=indent)
def GetSize(self):
return 1600, 1600 # size in pixels
def draw_line(self, ax, x1, y1, x2, y2):
self._lines.append((x1, y1, x2, y2))
def draw_text(self, ax, text, x, y):
self._text["x"].append(x)
self._text["y"].append(y)
self._text["text"].append(text)
def draw_open_circle(self, ax, x, y, radius, color=None):
self._open_circle_points.append((x, y))
self._open_circle_radii.append(2 * radius)
if color is None:
color = self._foreground
self._open_circle_colors.append(color)
def draw_filled_circle(self, ax, x, y, radius, color):
self._filled_circle_points.append((x, y))
self._filled_circle_radii.append(2 * radius)
self._filled_circle_colors.append(color)
def render(self, ax):
render_2d.render(self, ax)
data = []
if self._open_circle_points.size():
x, y = self._open_circle_points.parts()
z = self._open_circle_colors
data.append(
{
"x": list(x.round(1)),
"y": list(y.round(1)),
#'z': list(z),
"type": "scatter",
"mode": "markers",
"name": "missing reflections",
"showlegend": False,
"marker": {
#'color': list(z),
"color": (
"white" if self.settings.black_background else "black"
),
"line": {
#'color': 'black',
"width": 0
},
"symbol": "circle",
"size": 5,
},
}
)
if self._filled_circle_points.size():
x, y = self._filled_circle_points.parts()
z = self.scene.multiplicities.data().select(self.scene.slice_selection)
# why doesn't this work?
# colorscale = []
# assert len(z) == len(self._filled_circle_colors)
# for zi in range(flex.max(z)+1):
# i = flex.first_index(z, zi)
# if i is None: continue
# print i, self._filled_circle_colors[i], 'rgb(%i,%i,%i)' %tuple(rgb * 264 for rgb in self._filled_circle_colors[i])
# colorscale.append([zi, 'rgb(%i,%i,%i)' %self._filled_circle_colors[i]])
cmap_d = {
"rainbow": "Jet",
"heatmap": "Hot",
"redblue": "RdbU",
"grayscale": "Greys",
"mono": None,
}
color = list(z)
colorscale = cmap_d.get(
self.settings.color_scheme, self.settings.color_scheme
)
if self.settings.color_scheme == "mono":
color = "black"
colorscale = None
data.append(
{
"x": list(x.round(1)),
"y": list(y.round(1)),
#'z': list(z),
"type": "scatter",
"mode": "markers",
"name": "multiplicity",
"showlegend": False,
"marker": {
"color": color,
"colorscale": colorscale,
"cmin": 0,
"cmax": flex.max(self.scene.multiplicities.data()),
"showscale": True,
"colorbar": {"title": "Multiplicity", "titleside": "right"},
"line": {
#'color': 'white',
"width": 0
},
"symbol": "circle",
"size": 5,
},
}
)
text = {"mode": "text", "showlegend": False, "textposition": "top right"}
text.update(self._text)
data.append(text)
shapes = []
for x0, y0, x1, y1 in self._lines:
# color = 'rgb(%i,%i,%i)' %tuple(rgb * 264 for rgb in self._foreground)
color = "black"
shapes.append(
{
"type": "line",
"x0": x0,
"y0": y0,
"x1": x1,
"y1": y1,
"layer": "below",
"line": {"color": color, "width": 2},
}
)
d = {
"data": data,
"layout": {
"plot_bgcolor": "rgb(%i,%i,%i)"
% tuple(rgb * 264 for rgb in self._background),
"title": "Multiplicity plot (%s=%s)"
% (self.settings.slice_axis, self.settings.slice_index),
"shapes": shapes,
"hovermode": False,
"xaxis": {
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
"yaxis": {
"autorange": "reversed",
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
},
}
return d
master_phil = iotbx.phil.parse(
"""
include scope cctbx.miller.display.master_phil
unit_cell = None
.type = unit_cell
space_group = None
.type = space_group
plot {
filename = multiplicities.png
.type = path
}
json {
filename = None
.type = path
compact = True
.type = bool
}
size_inches = 20,20
.type = floats(size=2, value_min=0)
font_size = 20
.type = int(value_min=1)
""",
process_includes=True,
)
def run(args=sys.argv[1:]):
pcl = iotbx.phil.process_command_line_with_files(
args=args,
master_phil=master_phil,
reflection_file_def="data",
pdb_file_def="symmetry_file",
usage_string="xia2.plot_multiplicity scaled_unmerged.mtz [options]",
)
settings = pcl.work.extract()
file_name = settings.data
try:
hkl_file = any_reflection_file(file_name)
except Exception as e:
raise Sorry(str(e))
arrays = hkl_file.as_miller_arrays(merge_equivalents=False)
valid_arrays = []
array_info = []
for array in arrays:
if array.is_hendrickson_lattman_array():
continue
if (not array.is_real_array()) and (not array.is_complex_array()):
continue
labels = array.info().label_string()
desc = get_array_description(array)
array_info.append(f"{labels} ({desc})")
valid_arrays.append(array)
if len(valid_arrays) == 0:
msg = "No arrays of the supported types in this file."
raise Sorry(msg)
miller_array = valid_arrays[0]
plot_multiplicity(miller_array, settings)
def plot_multiplicity(miller_array, settings):
settings.scale_colors_multiplicity = True
settings.scale_radii_multiplicity = True
settings.expand_to_p1 = True
settings.expand_anomalous = True
settings.slice_mode = True
if settings.plot.filename is not None:
MultiplicityViewPng(
scene(miller_array, settings, merge=True), settings=settings
)
if settings.json.filename is not None:
MultiplicityViewJson(
scene(miller_array, settings, merge=True), settings=settings
)
|
11532663
|
from .operators import *
from .soft_regression import Soft4DFlowRegression, SoftArg2DFlowRegression
|
11532664
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from Models.base_man import BaseMan
import torch_utils
from setting_keywords import KeyWordSettings
from thirdparty.head_cnns import PACRRPlaneMaxPooling
import time
import layers
from Models.base_man import AttentionType
class MultiModalAttentionNetwork(BaseMan):
"""
Multimodal attention network
Examples:
>>> model = MultiModalAttentionNetwork()
>>> model.params['fixed_text_left'] = 50
>>> model.params['fixed_text_right'] = 1000
>>> model.params['dropout'] = 0.1
>>> model.params['context_window'] = 9 # size of context windows
>>> model.params['filters'] = 5 # number of filters when applying a conv
>>> model.params['max_ngram'] = 1
>>> model.params["norm_type"] = "l2" # to save mem
>>> model.params["beta"] = 1
>>> model.params["decomposition_type"] = 0 # type of decompsitional
"""
def __init__(self, params):
super(BaseMan, self).__init__()
self._params = params
self.src_word_emb = self._make_default_embedding_layer(params)
# n_position = max(self._params["fixed_text_left"], self._params["fixed_text_right"]) # checked
self.fixed_length_left = self._params["fixed_length_left"]
self.fixed_length_right = self._params["fixed_length_right"]
d_word_vec = self._params['embedding_output_dim']
dropout = self._params["dropout"]
self.input_channels = 4
self.attention_type = self._params["attention_type"]
self.use_average_dcompositional_att = self._params["use_average_dcompositional_att"]
################################################################################################
self.q_convs = nn.ModuleList()
self.d_convs = nn.ModuleList()
self.q_context_convs, self.d_context_convs = nn.ModuleList(), nn.ModuleList()
self.max_ngram = self._params["max_ngram"]
for i in range(self.max_ngram):
conv = nn.Sequential(
layers.Permute([0, 2, 1]),
nn.ConstantPad1d((0, i), 0),
nn.Conv1d(
in_channels=self._params["embedding_output_dim"],
out_channels=self._params['filters'],
kernel_size=i + 1
),
nn.Tanh()
)
self.q_convs.append(conv)
self.d_convs.append(conv)
conv2 = nn.Sequential(
layers.Permute([0, 2, 1]),
nn.ConstantPad1d((0, i), 0),
nn.Conv1d(
in_channels = self._params["elmo_vec_size"],
out_channels = self._params['filters'],
kernel_size = i + 1
),
nn.Tanh()
)
self.q_context_convs.append(conv2)
self.d_context_convs.append(conv2)
################################################################################################
# visual component
self.use_visual = self._params["use_visual"]
if self.use_visual:
num_ftrs = self._params["visual_feature_size"]
self.last_visual_size = 300 # same as word embeddings dim
self.image_fc1 = nn.Linear(num_ftrs, self.last_visual_size)
self.full_left_images_tensor = self._params["full_left_images_tensor"]
self.full_right_images_tensor = self._params["full_right_images_tensor"]
################################################################################################
# contextextualized component
self.left_elmo_tensor = self._params["left_elmo_tensor"]
self.right_elmo_tensor = self._params["right_elmo_tensor"]
################################################################################################
if self.attention_type in [AttentionType.UsingBilinearOnly, AttentionType.UsingBilinearDissim]:
self.linearQ = nn.Linear(self._params['filters'], 1, bias=False)
self.linearD = nn.Linear(self._params['filters'], 1, bias=False)
self.linearQD = nn.Linear(self._params['filters'], self._params['filters'], bias=False)
################################################################################################
self.head_conv_layers = nn.ModuleList()
if self._params["head_cnn_type"] == "pacrr_plane":
self.head_conv_layers.append(PACRRPlaneMaxPooling(num_conv_layers = self._params["conv_layers"],
input_channels = self.input_channels, filters_count = self._params["filters_count_pacrr"],
ns = self._params["n_s"]))
# assert len(self.head_conv_layers) == self.max_ngram
################################################################################################
factors = self.max_ngram * self.head_conv_layers[0].last_in_channels * self.head_conv_layers[0].L_new * self.head_conv_layers[0].R_new
if self.use_visual: factors += 1
self.linear = nn.Sequential(
# layers.Flatten(dim = 1),
nn.Linear(factors, 128),
nn.ReLU(), nn.Linear(128, 64),
nn.ReLU(), nn.Linear(64, 1))
def forward(self, query: torch.Tensor, document: torch.Tensor, verbose = False, **kargs):
"""Forward. of integer query tensor and document tensor """
max_left_len, max_right_len = query.size(1), document.size(1)
# Process left & right input.
# https://github.com/AdeDZY/K-NRM/blob/master/knrm/model/model_base.py#L96
tensor_mask = torch_utils.create_mask_tensor(query, document, threshold = 1)
doc_mask = (document > 0).float()
query_mask = (query > 0).float() # B, L
embed_query = self.src_word_emb(query.long()) # (B, L, D)
embed_doc = self.src_word_emb(document.long()) # (B, R, D)
# normalizing vectors
embed_query = F.normalize(embed_query, p = 2, dim = -1)
embed_doc = F.normalize(embed_doc, p = 2, dim = -1)
################################# For Contextualized Representation using ELMO #############################
query_ids = kargs[KeyWordSettings.QueryIDs] # (B, )
doc_ids = kargs[KeyWordSettings.DocIDs] # (B, )
assert query_ids.shape == doc_ids.shape
use_cuda = kargs[KeyWordSettings.UseCuda]
query_char_repr = self.left_elmo_tensor[query_ids]
doc_char_repr = self.right_elmo_tensor[doc_ids]
# I have to load to gpu at this step because left_tensor is too large to load to GPU
query_char_repr = torch_utils.gpu(query_char_repr, use_cuda) # (B, L, D1)
doc_char_repr = torch_utils.gpu(doc_char_repr, use_cuda) # (B, R, D1)
assert query_char_repr.size(1) == embed_query.size(1)
assert doc_char_repr.size(1) == embed_doc.size(1)
###############################################################################################
q_convs, d_convs = [], []
q_ctx_convs, d_ctx_convs = [], []
for q_conv, d_conv, \
q_context_conv, d_context_conv in zip(self.q_convs, self.d_convs, self.q_context_convs, self.d_context_convs):
q_out = q_conv(embed_query).transpose(1, 2) # to shape (B, D, L) => (B, F, L) => (B, L, F)
d_out = d_conv(embed_doc).transpose(1, 2) # to shape (B, D, R) => (B, F, R) => (B, R, F)
q_out = F.normalize(q_out, p = 2, dim = -1) # good stuff for relevance matching
d_out = F.normalize(d_out, p = 2, dim = -1)
q_convs.append(q_out)
d_convs.append(d_out)
q_ctx_out = q_context_conv(query_char_repr).transpose(1, 2) # B, L, F
d_ctx_out = d_context_conv(doc_char_repr).transpose(1, 2) # B, R, F
q_ctx_out = F.normalize(q_ctx_out, p=2, dim=-1)
d_ctx_out = F.normalize(d_ctx_out, p=2, dim=-1)
q_ctx_convs.append(q_ctx_out)
d_ctx_convs.append(d_ctx_out)
output_phis = []
for idx in range(self.max_ngram):
query_local_context = q_ctx_convs[idx] # (B, L, D)
doc_local_context = d_ctx_convs[idx] # (B, R, D)
sim_mat = self._get_sim_matrix(q_convs[idx], d_convs[idx])
sim_mat = sim_mat * tensor_mask
if self.attention_type == AttentionType.UsingDotProductOnly:
# using sim_mat, context_mat, sim_mat - context_mat, sim_mat * context_mat
# [S, L, S - L, S * L]
context_aware_mat = self._get_sim_matrix(query_local_context, doc_local_context) * tensor_mask
tensors = torch.stack([sim_mat,
context_aware_mat,
sim_mat - context_aware_mat,
sim_mat * context_aware_mat], dim=-1) # B, L, R, C
elif self.attention_type == AttentionType.UsingDotProductDisim:
# using sim_mat, context_mat, sim_mat - context_mat, dissimilarity * sim_mat
# [S, L, S - L, S * D]
context_aware_mat = self._get_sim_matrix(query_local_context, doc_local_context) * tensor_mask
dissimilarity = self._get_disimilarity_mat(query_local_context, doc_local_context, tensor_mask, self.use_average_dcompositional_att) * tensor_mask
tensors = torch.stack([sim_mat,
context_aware_mat,
sim_mat - context_aware_mat,
sim_mat * dissimilarity], dim=-1) # B, L, R, C
elif self.attention_type == AttentionType.UsingBilinearOnly:
# [S, B, S - B, S * B]
bilinear = self._get_bilinear_attention(query_local_context, doc_local_context) * tensor_mask
tensors = torch.stack([sim_mat,
bilinear,
sim_mat - bilinear,
bilinear * sim_mat], dim=-1) # B, L, R, C
elif self.attention_type == AttentionType.UsingBilinearDissim:
# [S, B, S - B, S * D]
bilinear = self._get_bilinear_attention(query_local_context, doc_local_context) * tensor_mask
dissimilarity = self._get_disimilarity_mat(query_local_context, doc_local_context, tensor_mask, self.use_average_dcompositional_att) * tensor_mask
tensors = torch.stack([sim_mat,
bilinear,
sim_mat - bilinear,
dissimilarity * sim_mat], dim=-1) # B, L, R, C
tensors = tensors.permute(0, 3, 1, 2) # (B, C, L, R)
phi = torch.flatten(self.head_conv_layers[0](tensors), start_dim = 1)
output_phis.append(phi)
phi = torch.cat(output_phis, dim = -1) # (B, x)
if self.use_visual:
# a list of size B, where each element is a list of image tensors
t1 = time.time()
query_images_indices = kargs[KeyWordSettings.QueryImagesIndices]
B1, n1, M1 = query_images_indices.shape # expected shape
assert n1 == 1
query_images = self.full_left_images_tensor[query_images_indices.flatten().long()] # B1 * n1 * M1, VD
doc_imgs_indices = kargs[KeyWordSettings.DocImagesIndices] # (B, n, M2, VD) or (B, M2, VD)
B, n, M2 = doc_imgs_indices.shape # expected shape
images_mask = torch_utils.create_mask_tensor_image(query_images_indices, doc_imgs_indices) # (B, n, M1, M2)
doc_images = self.full_right_images_tensor[doc_imgs_indices.flatten().long()] # B * n * M2, VD
left_feats = self.image_fc1(query_images) # (B * n1 * M1, H) we don't want visual_cnn on 30 duplicated queries images (not wise)
right_feats = self.image_fc1(doc_images) # (B * n * M2, H)
left_feats = left_feats.view(B1, M1, self.last_visual_size)
if B1 == 1: left_feats = left_feats.expand(B, M1, self.last_visual_size) # during testing
right_feats = right_feats.view(B, n * M2, self.last_visual_size)
right_feats = F.normalize(right_feats, p=2, dim=-1)
left_feats = F.normalize(left_feats, p=2, dim=-1)
scores = torch.bmm(left_feats, right_feats.permute(0, 2, 1)) # (B, M1, n * M2)
scores = scores.view(B, M1, n, M2).permute(0, 2, 1, 3) # (B, n, M1, M2)
# masking
assert scores.size() == images_mask.size(), (scores.size(), images_mask.size())
scores = scores * images_mask
scores = scores.view(B * n, M1, M2)
visual_scores, _ = torch.flatten(scores, start_dim = 1).max(-1)
visual_scores = visual_scores.unsqueeze(-1) # (B * n, 1)
phi = torch.cat([phi, visual_scores], dim = -1)
t2 = time.time()
# print("Running time of CNN in forward: ", (t2 - t1), "seconds")
out = self.linear(phi)
if verbose:
print("out: ", out.squeeze())
# print("After dense and tanh: ", out)
if KeyWordSettings.OutputRankingKey in kargs and kargs[KeyWordSettings.OutputRankingKey] and self.use_visual:
return torch.cat([out, torch.flatten(scores, start_dim = 1)], dim = -1) # for error analysis (B, 2)
return out.squeeze()
|
11532670
|
import struct
import sys
class WAL():
# http://www.cclgroupltd.com/the-forensic-implications-of-sqlites-write-ahead-log/
def __init__(self, f):
self.f=f
HEADER = ">LLLLLLLL"
size = struct.calcsize(HEADER)
data = self.f.read(size)
(self.signature, self.version, self.page_size, self.sequence, self.salt1, self.salt2, self.checksum1, self.checksum2) = (struct.unpack(HEADER,data))
if not (self.signature==0x377F0682 or self.signature==0x377F0683):
raise Exception("Invalid signature ({:02x})".format(self.signature))
pass
def frames(self):
while True:
FRAME = ">LLLLLL"
size = struct.calcsize(FRAME)
data = self.f.read(size)
if (len(data)) == 0:
break
(page_number, size_in_pages, salt1, salt2, checksum1, checksum2) = (struct.unpack(FRAME,data))
page = self.f.read(self.page_size)
yield (page_number, size_in_pages, salt1, salt2, checksum1, checksum2, page)
|
11532690
|
import pymel.internal.factories as _factories
from . import general as _general
def fluidEmitter(*args, **kwargs):
"""
Creates an emitter object. If object names are provided or if objects are selected, applies the emitter to the
named/selected object(s)in the scene. Fluid will then be emitted from each. If no objects are named or selected, or if
the -pos option is specified, creates a positional emitter. If an emitter was created, the command returns the name of
the object owning the emitter, and the name of emitter shape. If an emitter was queried, the command returns the results
of the query.
Flags:
- cycleEmission : cye (unicode) [query,edit]
Possible values are "none" and "frame." Cycling emission restarts the random number stream after a specified interval.
This can either be a number of frames or a number of emitted particles. In each case the number is specified by the
cycleInterval attribute. Setting cycleEmission to "frame" and cycleInterval to 1 will then re-start the random stream
every frame. Setting cycleInterval to values greater than 1 can be used to generate cycles for games work.
- cycleInterval : cyi (int) [query,edit]
Specifies the number of frames or particles between restarts of the random number stream. See cycleEmission. Has no
effect if cycleEmission is set to None.
- densityEmissionRate : der (float) [query,edit]
Rate at which density is emitted.
- fluidDropoff : fdr (float) [query,edit]
Fluid Emission Dropoff in volume
- fuelEmissionRate : fer (float) [query,edit]
Rate at which is emitted.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a
tuple or a list.
- heatEmissionRate : her (float) [query,edit]
Rate at which density is emitted.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which emission ends.
- minDistance : mnd (float) [query,edit]
Minimum distance at which emission starts.
- name : n (unicode) [query,edit]
- position : pos (float, float, float) [query,edit]
Positional emitter at world space location (x,y,z).
- rate : r (float) [query,edit]
Rate at which particles emitted (can be non-integer). For point emission this is rate per point per unit time. For
surface emission it is rate per square unit of area per unit time.
- torusSectionRadius : tsr (float) [query,edit]
Section radius for a torus volume. Applies only to torus. Similar to the section radius in the torus modelling
primitive.
- type : typ (unicode) [query,edit]
Type of emitter. The choices are omni | dir | direction | surf | surface | curve | curv. The default is omni. The full
definition of these types are: omnidirectional point emitter, directional point emitter, surface emitter, or curve
emitter.
- volumeOffset : vof (float, float, float) [query,edit]
Volume offset of the emitter. Volume offset translates the emission volume by the specified amount from the actual
emitter location. This is in the emitter's local space.
- volumeShape : vsh (unicode) [query,edit]
Volume shape of the emitter. Sets/edits/queries the field's volume shape attribute. If set to any value other than
"none", determines a 3-D volume within which particles are generated. Values are: "cube," "sphere," "cylinder," "cone,"
"torus."
- volumeSweep : vsw (float) [query,edit]
Volume sweep of the emitter. Applies only to sphere, cone, cylinder, and torus. Similar effect to the sweep attribute in
modelling.
Derived from mel command `maya.cmds.fluidEmitter`
"""
pass
def drag(*args, **kwargs):
"""
Drag exerts a friction, or braking force proportional to the speed of a moving object. If direction is not enabled, the
drag acts opposite to the current velocity of the object. If direction is enabled, it acts opposite to the component of
the velocity in the specified direction. The force is independent of the position of the affected object. The transform
is the associated dependency node. Use connectDynamic to cause the field to affect a dynamic object. If fields are
created, this command returns the names of each of the fields. If a field was queried, the results of the query are
returned. If a field was edited, the field name is returned. If object names are provided or the active selection list
is non-empty, the command creates a field for every object in the list and calls addDynamic to add it to the object. If
the list is empty, the command defaults to -pos 0 0 0. Setting the -pos flag with objects named on the command line is
an error.
Flags:
- attenuation : att (float) [query,edit]
Attentuation rate of field
- directionX : dx (float) [query,edit]
X-component of direction.
- directionY : dy (float) [query,edit]
Y-component of direction.
- directionZ : dz (float) [query,edit]
Z-component of direction
- magnitude : m (float) [query,edit]
Strength of field.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which field is exerted. -1 indicates that the field has no maximum distance.
- name : n (unicode) [query,edit]
name of field
- perVertex : pv (bool) [query,edit]
Per-vertex application. If this flag is set true, then each individual point (CV, particle, vertex,etc.) of the chosen
object exerts an identical copy of the force field. If this flag is set to false, then the force is exerted only from
the geometric center of the set of points.
- position : pos (float, float, float) [query,edit]
Position in space where you want to place a field. The gravity then emanates from this position in space rather than
from an object. Note that you can both use -pos (creating a field at a position) and also provide object names.
- torusSectionRadius : tsr (float) [query,edit]
- useDirection : ud (bool) [query,edit]
Enable/disable direction. Drag will use -dx/-dy/-dz arguments if and only if this flag is set true.Flag can appear in
Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
- volumeExclusion : vex (bool) [query,edit]
- volumeOffset : vof (float, float, float) [query,edit]
- volumeShape : vsh (unicode) [query,edit]
- volumeSweep : vsw (float) [query,edit]
Derived from mel command `maya.cmds.drag`
"""
pass
def particleInstancer(*args, **kwargs):
"""
This command is used to create a particle instancer node and set the proper attributes in the particle shape and in the
instancer node. It will also create the connections needed between the particle shape and the instancer node.
Flags:
- addObject : a (bool) [create,edit]
This flag indicates that objects specified by the -object flag will be added to the instancer node as instanced objects.
- aimAxis : aa (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the aim axis of the instanced objects.
- aimDirection : ad (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the aim direction of the instanced objects.
- aimPosition : ap (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the aim position of the instanced objects.
- aimUpAxis : aua (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the aim up axis of the instanced objects.
- aimWorldUp : awu (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the aim world up of the instanced objects.
- attributeMapping : am (bool) [query]
This flag queries the particle attribute mapping list.
- cycle : c (unicode) [create,query,edit]
This flag sets or queries the cycle attribute for the instancer node. The options are "none", "sequential". The default
is "none".
- cycleStartObject : sto (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the cycle start object of the instanced objects.
- cycleStep : cs (float) [create,query,edit]
This flag sets or queries the cycle step attribute for the instancer node. This attribute indicates the size of the step
in frames or seconds (see cycleStepUnits).
- cycleStepUnits : csu (unicode) [create,query,edit]
This flag sets or queries the cycle step unit attribute for the instancer node. The options are "frames" or "seconds".
The default is "frames".
- index : i (int) [query]
This flag is used to query the name of the ith instanced object.
- instanceId : id (unicode) [query]
This flag queries the particle attribute name to be used for the id of the instanced objects.Flag can appear in Create
mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
- levelOfDetail : lod (unicode) [create,query,edit]
This flag sets or queries the level of detail of the instanced objects. The options are "geometry", "boundingBox" or
"boundingBoxes". The default is "geometry".
- name : n (unicode) [create,query]
This flag sets or queries the name of the instancer node.
- object : obj (unicode) [create,query,edit]
This flag indicates which objects will be add/removed from the list of instanced objects. The flag is used in conjuction
with the -addObject and -remove flags. If neither of these flags is specified on the command line then -addObject is
assumed.
- objectIndex : oi (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the object index of the instanced objects.
- particleAge : age (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the age of the instanced objects.
- position : p (unicode) [create,query,edit]
DEFAULT "worldPosition" This flag sets or queries the particle attribute name to be used for the positions of the
instanced objects. By default the attribute is worldPosition.
- removeObject : rm (bool) [edit]
This flag indicates that objects specified by the -object flag will be removed from the instancer node as instanced
objects.
- rotation : r (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the rotation of the instanced objects.
- rotationOrder : ro (unicode) [create,query,edit]
This flag specifies the rotation order associated with the rotation flag. The options are XYZ, XZY, YXZ, YZX, ZXY, or
ZYX. By default the attribute is XYZ.
- rotationType : rt (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the rotation type of the instanced objects.
- rotationUnits : ru (unicode) [create,query,edit]
This flag specifies the rotation units associated with the rotation flag. The options are degrees or radians. By default
the attribute is degrees.
- scale : sc (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the scale of the instanced objects.
- shear : sh (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the shear of the instanced objects.
- visibility : vis (unicode) [create,query,edit]
This flag sets or queries the particle attribute name to be used for the visibility of the instanced objects.
Derived from mel command `maya.cmds.particleInstancer`
"""
pass
def fluidCacheInfo(*args, **kwargs):
"""
A command to get information about the fluids cache. Get the startFrame and resolution for InitialConditions. Get the
endFrame as well for a playback cache. Note that for the playback cache, it will look at the current time (or last frame
if the current time is past end of cache) In query mode, return type is based on queried flag.
Flags:
- attribute : at (unicode) [create,query,edit]
Modifier to the "hasData" flag, used to query whether a cache has data (at the current time) for a specific fluid
attribute. Valid attribute values are "density", "velocity", "temperature", "fuel", "color", "coordinates" (for texture
coordinates), "falloff".
- cacheTime : t (time) [create,query,edit]
Only valid with the -hasData flag. The time the -hasData flag uses when it queries the cache to see if there is
data.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
- endFrame : ef (bool) [create,query,edit]
Returns end time of cache as float.
- hasCache : hc (bool) [create,query,edit]
Returns true if fluid has specified cache, false if not.
- hasData : hd (bool) [create,query,edit]
Queries whether a given cache has data in it at the time specified by the -time flag. (If not -time flag is present,
-hasData assumes the current time.) When used with the "attribute" flag, indicates if data for the specified attribute
exists in the cache. When used without the "attribute" flag, "hasData" indicates whether there is data in the cache for
any of the valid fluid attributes.
- initialConditions : ic (bool) [create,query,edit]
Specifies the cache to be queried is the "Initial Conditions" cache.
- playback : pb (bool) [create,query,edit]
Specifies the cache to be queried is the "Playback" cache.
- resolution : re (bool) [create,query,edit]
Returns cache resolution as float[].
- startFrame : sf (bool) [create,query,edit]
Returns start time for cache as float.
Derived from mel command `maya.cmds.fluidCacheInfo`
"""
pass
def saveFluid(*args, **kwargs):
"""
A command to save the current state of the fluid to the initial state cache. The grids to be saved are determined by the
cache attributes: cacheDensity, cacheVelocity, etc. These attributes are normally set from the options on Set Initial
State. The cache must be set up before invoking this command. In query mode, return type is based on queried flag.
Flags:
- currentTime : ct (int) [create,query,edit]
cache state of fluid at current time
- endTime : et (int) [create,query,edit]
end Time for cacheingFlag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple
or a list.
- startTime : st (int) [create,query,edit]
start Time for cacheing
Derived from mel command `maya.cmds.saveFluid`
"""
pass
def expression(*args, **kwargs):
"""
This command describes an expression that belongs to the current scene. The expression is a block of code of unlimited
length with a C-like syntax that can perform conversions, mathematical operations, and logical decision making on any
numeric attribute(s) in the scene. One expression can read and alter any number of numeric attributes. Theoretically,
every expression in a scene can be combined into one long expression, but it is recommended that they are separated for
ease of use and editing, as well as efficiency.If this command is being sent by the command line or in a script, then
the user should be sure to embed escaped newlines (\n), tabs (\t) for clarity when reading them in the expression
editor. Also, quotes in an expression must be escaped (\") so that they are not confused by the system as the end of
your string. When using the expression editor, these characters are escaped for you unless they are already within
quotes.Note, expressions that alter or use per-particle attributes of a particle shape should use the 'dynExpression'
command.
Flags:
- alwaysEvaluate : ae (int) [create,query,edit]
If this is TRUE (the default), then the expression will be evaluated whenever time changes regardless of whether the
other inputs have changed, and an output is requested. If it is FALSE, then the expression will only be evaluated if one
or more of the inputs changes and an output is requested. Note, if 'time' or 'frame' are inputs, then the expression
will act as if this was set to TRUE.
- animated : an (int) [query,edit]
- attribute : a (unicode) [query,edit]
- name : n (unicode) [create,query,edit]
Sets the name of the dependency graph node to use for the expression
- object : o (unicode) [create,query,edit]
Sets the "default" object for the expression. This allows the expression writer to not type the object name for
frequently-used objects. See the examples below.
- shortNames : sn (bool) [create,query,edit]
When used with the -q/query flag, tells the command to return the expression with attribute names as short as possible.
The default is to return the FULL attribute name, regardless of how the user entered it into the expression, including
the object names. With this flag set, attribute names are returned as their short versions, and any attribute that
belongs to the default object, if there is one specified, will not display the object's name.
- string : s (unicode) [create,query,edit]
Set the expression string
- unitConversion : uc (unicode) [query,edit]
Insert specified unit conversion nodes at creation: "all", "none," or "angularOnly." Default is "all." For angularOnly,
will insert unit conversion nodes only for angular attributes (allowing degrees-to-radians conversion). This is for
performance tuning. If queried, returns the option used when the expression was created or last edited.Flag can appear
in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
Derived from mel command `maya.cmds.expression`
"""
pass
def vortex(*args, **kwargs):
"""
A vortex field pulls objects in a circular direction, like a whirlpool or tornado. Objects affected by the vortex field
tend to rotate around the axis specified by -ax, -ay, and -az. The transform is the associated dependency node. Use
connectDynamic to cause the field to affect a dynamic object. If fields are created, this command returns the names of
each of the fields. If a field was queried, the results of the query are returned. If a field was edited, the field name
is returned. If object names are provided or the active selection list is non-empty, the command creates a field for
every object in the list and calls addDynamic to add it to the object. If the list is empty, the command defaults to
-pos 0 0 0. Setting the -pos flag with objects named on the command line is an error.
Flags:
- attenuation : att (float) [query,edit]
Attentuation rate of field
- axisX : ax (float) [query,edit]
X-component of vortex axis
- axisY : ay (float) [query,edit]
Y-component of vortex axis
- axisZ : az (float) [query,edit]
Z-component of vortex axisFlag can appear in Create mode of commandFlag can have multiple arguments, passed either as a
tuple or a list.
- magnitude : m (float) [query,edit]
Strength of field.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which field is exerted. -1 indicates that the field has no maximum distance.
- name : n (unicode) [query,edit]
name of field
- perVertex : pv (bool) [query,edit]
Per-vertex application. If this flag is set true, then each individual point (CV, particle, vertex,etc.) of the chosen
object exerts an identical copy of the force field. If this flag is set to false, then the force is exerted only from
the geometric center of the set of points.
- position : pos (float, float, float) [query,edit]
Position in space where you want to place a field. The gravity then emanates from this position in space rather than
from an object. Note that you can both use -pos (creating a field at a position) and also provide object names.
- torusSectionRadius : tsr (float) [query,edit]
- volumeExclusion : vex (bool) [query,edit]
- volumeOffset : vof (float, float, float) [query,edit]
- volumeShape : vsh (unicode) [query,edit]
- volumeSweep : vsw (float) [query,edit]
Derived from mel command `maya.cmds.vortex`
"""
pass
def event(*args, **kwargs):
"""
The event command assigns collision events to a particle object. Collision events are stored in multi-attributes in the
particle shape. The event command returns the event name.
Flags:
- count : ct (int) [query,edit]
Collision number (for each particle) to which this event applies. Zero (the default) indicates that it applies to all
collisions.
- delete : d (bool) [create]
Delete the specified event.
- dieAtCollision : die (bool) [query,edit]
Particle dies at the collision specified by "count." If no count value is given, die at first collision.
- emit : em (int) [query,edit]
Emit n additional particles into the assigned target object. The original (colliding) particle survives as well, and
remains in its original object. The new particles have age zero and mass equal to the emitting particle. They use the
velocity inheritance parameter of the target object.
- idNumber : id (int) []
- list : ls (bool) [create]
List all events for the chosen shape, like this: event1Name event2Name ... If no shape identified, list all events for
all shapes, like this: shape1Name event1Name shape2Name event2Name... Returns a string array.
- name : n (unicode) [create,query,edit]
Assign a name to an event you are creating, or identify an event you wish to edit, query, or delete. See examples.
- proc : pr (script) [create,query,edit]
Specify a MEL proc to be called each time the event occurs. This must be a global proc with arguments as follows: global
proc procName( string obj, int id, string objHit ); Arguments passed in are the name of the particle object, the id of
the particle which collided, and the name of the object collided with. You can use particle -id -q to get values of the
particle's attributes.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a
tuple or a list.
- random : r (bool) [query,edit]
Used with -split and -emit flags. If -random is set true and -split or -emit is set to n, then a random number of
particles uniformly distributed between 1 and n will be created at the event.
- rename : re (unicode) [query]
Assign a new name to an event you are editing. See examples.
- select : s (bool) []
This flag is obsolete. See the -name flag.
- split : spl (int) [query,edit]
Colliding particle splits into specified number of new particles. These new particles become part of the assigned target
object. If no target has been assigned, they become part of the same object. The new particles inherit the current age
of the particle that split. They use the velocity inheritance parameter of the target object. If you set both emit and
split, the event will do both: first emit new particles, then split the original one. This is a change from earlier
versions where emit and split were mutually exclusive.
- spread : sp (float) [query,edit]
Particles created at collision will spread out a random amount from the rebound direction of the colliding particle. The
spread is specified as a fraction (0-1) of 90 degrees. If spread is set at 0 (the default) all the new particles created
may coincide.
- target : t (unicode) [query,edit]
Target object for emitting or split particles. New particles created through the -emit or -split flags join this object.
Derived from mel command `maya.cmds.event`
"""
pass
def addPP(*args, **kwargs):
"""
Adds per-point (per-cv, per-vertex, or per-particle) attribute capability for an attribute of an emitter or field. The
-atr flag identifies the attribute. If no attribute is named, addPP returns a warning and does nothing. The command adds
any other necessary attributes wherever they are needed, and makes all necessary connections. If any of the attributes
already exist, the command simply connects to them. The command also toggles any relevant attributes in the emitter or
field to indicate that per-point capability is being used. The command adds a separate per-point attribute to the owning
object for each emitter/field. For example, for emission rate, there is a separate ratePP for each emitter. These
attributes are named according to the convention emitter/field nameattr namePP. For example, if a particle shape owned
an emitter "smoke", that shape would get attribute "smokeRatePP." The name of the object must be the emitter or field
for which per-point capability is to be added (or the name of its parent transform). The addPP command adds the per-
point capability for that emitter or field but not for any others owned by the same object. If per-point capability is
not supported for a named object, the command will issue a warning, but will continue executing for any other objects
which were valid. If no objects are named, addPP uses any objects in the current selection list for which the specified
attribute is applicable. (For example, it would add per-point rate for all selected emitters.) If addPP detects that the
owner object has left-over attributes from a deleted emitter, it will remove those attributes before adding the new
ones. Thus, you can delete the emitter, make a new one, and run addPP again, and addPP will clean up after the deleted
emitter. This is most commonly used if you have a geometry emitter and then decide to change the geometry. Likewise, if
addPP detects that some cvs or vertices have been added to the geometry, then it will expand the corresponding multi-
attributes as necessary. However, if it detects that some cvs/vertices have been removed, it will not remove any entries
from the multi. See the user manual for more discussion.
Modifications:
- returns a list of PyNode objects
Flags:
- attribute : atr (unicode) [create]
Name of attribute to which you wish to add PP capability. Currently the only attribute supported is rate (for
emitters).Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
Derived from mel command `maya.cmds.addPP`
"""
pass
def turbulence(*args, **kwargs):
"""
A turbulence field causes irregularities (also called 'noise' or 'jitter') in the motion of affected objects. Use
connectDynamic to cause the field to affect a dynamic object. If fields are created, this command returns the names of
each of the fields. If a field was queried, the results of the query are returned. If a field was edited, the field name
is returned. If object names are provided or the active selection list is non-empty, the command creates a field for
every object in the list and calls addDynamic to add it to the object. If the list is empty, the command defaults to
-pos 0 0 0. Setting the -pos flag with objects named on the command line is an error.
Flags:
- attenuation : att (float) [query,edit]
Attentuation rate of field
- frequency : f (float) [query,edit]
Frequency of turbulence field. This determines how often motion is disrupted.
- magnitude : m (float) [query,edit]
Strength of field. As this increases, the affected objects will move faster.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which field is exerted.
- name : n (unicode) [query,edit]
name of field
- noiseLevel : nsl (int) [query,edit]
If the noiseLevel parameter is greater than zero, the field will do multiple lookups in the table. Each additional
lookup is weighted using noiseRatio (which see). The noiseLevel is the number of additional lookups, so if noiseLevel is
0, there is just one lookup. A value of 0 (the default) corresponds to the way the field behaved prior to Maya 3.0.
- noiseRatio : nsr (float) [query,edit]
If noiseLevel is greater than zero, then noiseRatio is the relative magnitude for each consecutive noise evaluation.
These are cumulative: for example, if noiseRatio is 0.5, then the first evaluation is weighted 0.5, the second 0.25, and
so on. Has no effect if noiseLevel is zero.Flag can appear in Create mode of commandFlag can have multiple arguments,
passed either as a tuple or a list.
- perVertex : pv (bool) [query,edit]
Per-vertex application. If this flag is set true, then each individual point (CV, particle, vertex,etc.) of the chosen
object exerts an identical copy of the force field. If this flag is set to false, then the force is exerted only from
the geometric center of the set of points.
- phase : p (float) [query,edit]
Phase shift of turbulence field. This influences the direction of the disruption. This flag is obsolete and is retained
only for backward compatibility. It is replaced by -phaseX, -phaseY, and -phaseZ. Setting -phase is identical to setting
-phaseZ (the phase shift was always in the Z dimension).
- phaseX : px (float) [query,edit]
X component of phase shift of turbulence field. This influences the direction of the disruption.
- phaseY : py (float) [query,edit]
Y component of phase shift of turbulence field. This influences the direction of the disruption.
- phaseZ : pz (float) [query,edit]
Z component of phase shift of turbulence field. This influences the direction of the disruption.
- position : pos (float, float, float) [query,edit]
Position in space where you want to place a field. The field then emanates from this position in space rather than from
an object. Note that you can both use -pos (creating a field at a position) and also provide object names.
- torusSectionRadius : tsr (float) [query,edit]
- volumeExclusion : vex (bool) [query,edit]
- volumeOffset : vof (float, float, float) [query,edit]
- volumeShape : vsh (unicode) [query,edit]
- volumeSweep : vsw (float) [query,edit]
Derived from mel command `maya.cmds.turbulence`
"""
pass
def constrain(*args, **kwargs):
"""
This command constrains rigid bodies to the world or other rigid bodies. In query mode, return type is based on queried
flag.
Flags:
- barrier : br (bool) [create,query]
Creates a barrier constraint. This command requires one rigid bodies.
- damping : d (float) [create,query,edit]
Sets the damping constant. Default value: 0.1 Range: -1000.0 to 1000.0
- directionalHinge : dhi (bool) [create,query]
Creates a directional hinge constraint. This command requires two rigid bodies. The directional hinge always maintains
the initial direction of its axis.
- hinge : hi (bool) [create,query]
Creates a hinge constraint. This command requires one or two rigid bodies.
- interpenetrate : i (bool) [create,query,edit]
Allows (or disallows) the rigid bodies defined in the constrain to ipenetrate.Flag can appear in Create mode of
commandFlag can have multiple arguments, passed either as a tuple or a list.
- nail : na (bool) [create,query]
Creates a nail constraint. This command requires one rigid body.
- name : n (unicode) [create,query,edit]
Names the rigid constraint.
- orientation : o (float, float, float) [create,query,edit]
Set initial orientation of the constraint in world space. This command is only valid with hinge and barrier constraints
Default value: 0.0 0.0 0.0
- pinConstraint : pin (bool) [create,query]
Creates a pin constraint. This command requires two rigid bodies.
- position : p (float, float, float) [create,query,edit]
Set initial position of the constraint in world space. Default value: 0.0 0.0 0.0 for uni-constraints, midpoint of
bodies for deul constraint.
- restLength : rl (float) [create,query,edit]
Sets the rest length. Default value: 1.0
- spring : s (bool) [create,query]
Creates a spring constraint. This command requires one or two rigidies.
- stiffness : st (float) [create,query,edit]
Sets the springs stiffness constant. Default value: 5.0
Derived from mel command `maya.cmds.constrain`
"""
pass
def particleExists(*args, **kwargs):
"""
This command is used to query if a particle or soft object with the given name exists. Either the transform or shape
name can be used as well as the name of the soft object.
Derived from mel command `maya.cmds.particleExists`
"""
pass
def resampleFluid(*args, **kwargs):
"""
A command to extend the fluid grid, keeping the voxels the same size, and keeping the existing contents of the fluid in
the same place. Note that the fluid transform is also modified to make this possible. In query mode, return type is
based on queried flag.
Flags:
- resampleDepth : rd (int) [create,query,edit]
Change depth resolution to this valueFlag can appear in Create mode of commandFlag can have multiple arguments, passed
either as a tuple or a list.
- resampleHeight : rh (int) [create,query,edit]
Change height resolution to this value
- resampleWidth : rw (int) [create,query,edit]
Change width resolution to this value
Derived from mel command `maya.cmds.resampleFluid`
"""
pass
def rigidSolver(*args, **kwargs):
"""
This command sets the attributes for the rigid solver In query mode, return type is based on queried flag.
Flags:
- autoTolerances : at (bool) [query,edit]
Turns the auto tolerance calculation on and off. The auto tolerances calculation will override the default or user
defined values of the step size and collision tolerance value that is calculated based on the objects in the scene.
Default: 0 (off)
- bounciness : b (bool) [query,edit]
Turns bounciness on and off for the an the objects in the simulation. Default value: on
- cacheData : cd (bool) [query,edit]
Turns the cache on fall all rigid bodies in the system. Default value: off
- collide : c (bool) [query,edit]
Disallows the interpenetration of the two rigid bodies listed. Default: Collide is on for all bodies.
- collisionTolerance : ct (float) [query,edit]
Sets the collision tolerance. This is the error at which two objects are considered to have collided. Range: 0.0005 -
1.000 Default: 0.02
- contactData : ctd (bool) [query,edit]
Turns the contact data information on/off for all rigid bodies. Default value: off
- create : cr (bool) [create]
Creates a new rigid solver.
- current : cu (bool) [create]
Sets rigid solver as the current solver.
- deleteCache : deleteCache (bool) [query,edit]
Deletes the cache for all rigid bodies in the system.
- displayCenterOfMass : dcm (bool) [query,edit]
Displays the center of mass icon. Default value: on
- displayConstraint : dc (bool) [query,edit]
Displays the constraint vectors. Default value: on
- displayVelocity : dv (bool) [query,edit]
Displays the velocity vectors. Default value: off
- dynamics : d (bool) [query,edit]
Turns dynamics on and off for the an the objects in the simulation. Default value: on
- friction : f (bool) [query,edit]
Turns friction on and off for the an the objects in the simulation. Default value: on
- interpenetrate : i (bool) [query,edit]
Allows the two rigid bodies listed to interpenetrate. Default: interpenetration is off for all bodies.
- interpenetrationCheck : ic (bool) [edit]
Checks for interpenetrating rigid bodies in the scene.
- name : n (unicode) []
- rigidBodies : rb (bool) [query]
Returns a list of rigid bodies in the solver.
- rigidBodyCount : rbc (bool) [query]
Returns the number of rigid bodies in the solver.Flag can appear in Create mode of commandFlag can have multiple
arguments, passed either as a tuple or a list.
- showCollision : sc (bool) [query,edit]
Displays the colliding objects in a different color.
- showInterpenetration : si (bool) [query,edit]
Displays the interpenetrating objects in a different color.
- solverMethod : sm (int) [query,edit]
Sets the solver method. The choices are 0 | 1 | 2. 0 = Euler (fastest/least acurate), 1 = Runge-Kutta ( slower/more
acurate), 2 = adaptive Runge-Kutta (slowest/most acurate). The default is 2 (adaptive Runge-Kutta)
- startTime : stt (float) [create,query,edit]
Sets the start time for the solver.
- state : st (bool) [query,edit]
Turns the rigid solver on or off.
- statistics : sta (bool) [query,edit]
Turns the statistic information on/off for all rigid bodies. Default value: off
- stepSize : s (float) [query,edit]
Sets the solvers step size. This is the maximum size of a single step the solver will take at one time. Range: 0.0004 -
0.100 Default: 0.0333
- velocityVectorScale : vs (float) [query,edit]
scales the velocity vector display. Default value: 1.0
Derived from mel command `maya.cmds.rigidSolver`
"""
pass
def dynPref(*args, **kwargs):
"""
This action modifies and queries the current state of "autoCreate rigid bodies", "run up to current time", and "run up
from" (previous time or start time). In query mode, return type is based on queried flag.
Flags:
- autoCreate : ac (bool) [create,query]
If on, autoCreate rigid bodies.
- echoCollision : ec (bool) [create,query]
If on, will cause particle systems to echo to the Script Editor the command that they are running for each particle
collision event. If off, only the output of the command will be echoed.Flag can appear in Create mode of commandFlag can
have multiple arguments, passed either as a tuple or a list.
- runupFrom : rf (int) [create,query]
If on, run up from previous time; if 2, run up from start time
- runupToCurrentTime : rt (bool) [create,query]
If on, run up the scene to current time
- saveOnQuit : sq (bool) [create,query]
If on, save the current values of preferences to userPrefs file.
- saveRuntimeState : sr (bool) [create,query]
If on, runtime state as well as initial state of all particle objects will be saved to file. If off, only initial state
will be saved.
Derived from mel command `maya.cmds.dynPref`
"""
pass
def getParticleAttr(*args, **kwargs):
"""
This action will return either an array of values, or the average value and maximum offset, for a specied per-particle
attribute of a particle object or component. If a particle component is specified on the command line, values are
returned for that component only. If an object name is given instead, values are returned for all particles in that
object. If no object name is passed, but a particle object or component is selected, values are returned for the
selection. If you list components, they must all be from the same particle object; the action ignores all objects after
the first. Likewise if you list more than one object, the actiion will return values only for the first one.
Flags:
- array : a (bool) [create]
Tells the action whether you want a full array of data. If set true, the action returns an array of floats containing
the values for all the specified particles. If set false (the default), the action returns the average value and the
maximum offset from the average over the component. If the attribute is a vector attribute, the action returns six
values: Average X, Average Y, Average Z, Maximum offset in X, Y, and Z of component.Flag can appear in Create mode of
commandFlag can have multiple arguments, passed either as a tuple or a list.
- attribute : at (unicode) [create]
Tells the action which attribute you want the value of. Must be a per-particle attribute.
- object : o (unicode) [create]
This flag is obsolete. Instead of using it, please pass the name of the object and/or components you want on the command
line. See the examples.
Derived from mel command `maya.cmds.getParticleAttr`
"""
pass
def uniform(*args, **kwargs):
"""
A uniform field pushes objects in a fixed direction. The field strength, but not the field direction, depends on the
distance from the object to the field location. The transform is the associated dependency node. Use connectDynamic to
cause the field to affect a dynamic object. If fields are created, this command returns the names of each of the fields.
If a field was queried, the results of the query are returned. If a field was edited, the field name is returned. If
object names are provided or the active selection list is non-empty, the command creates a field for every object in the
list and calls addDynamic to add it to the object. If the list is empty, the command defaults to -pos 0 0 0. Setting the
-pos flag with objects named on the command line is an error.
Flags:
- attenuation : att (float) [query,edit]
Attentuation rate of field
- directionX : dx (float) [query,edit]
X-component of direction.
- directionY : dy (float) [query,edit]
Y-component of direction.
- directionZ : dz (float) [query,edit]
Z-component of directionFlag can appear in Create mode of commandFlag can have multiple arguments, passed either as a
tuple or a list.
- magnitude : m (float) [query,edit]
Strength of field.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which field is exerted. -1 indicates that the field has no maximum distance.
- name : n (unicode) [query,edit]
name of field
- perVertex : pv (bool) [query,edit]
Per-vertex application. If this flag is set true, then each individual point (CV, particle, vertex,etc.) of the chosen
object exerts an identical copy of the force field. If this flag is set to false, then the force is exerted only from
the geometric center of the set of points.
- position : pos (float, float, float) [query,edit]
Position in space where you want to place a field. The gravity then emanates from this position in space rather than
from an object. Note that you can both use -pos (creating a field at a position) and also provide object names.
- torusSectionRadius : tsr (float) [query,edit]
- volumeExclusion : vex (bool) [query,edit]
- volumeOffset : vof (float, float, float) [query,edit]
- volumeShape : vsh (unicode) [query,edit]
- volumeSweep : vsw (float) [query,edit]
Derived from mel command `maya.cmds.uniform`
"""
pass
def soft(*args, **kwargs):
"""
Makes a soft body from the object(s) passed on the command line or in the selection list. The geometry can be a NURBS,
polygonal, lattice object. The resulting soft body is made up of a hierarchy with a particle shape and a geometry shape,
thus: T / \ T G / P Dynamics are applied to the particle shape and the resulting particle
positions then drive the locations of the geometry's CVs, vertices, or lattice points. With the convert option, the
particle shape and its transform are simply inserted below the original shape's hierarchy. With the duplicate option,
the original geometry's transform and shape are duplicated underneath its parent, and the particle shape is placed under
the duplicate. Note that any animation on the hierarchy will affect the particle shape as well. If you do not want then,
then reparent the structure outside the hierarchy. When duplicating, the soft portion (the duplicate) is given the name
"copyOf" + "original object name". The particle portion is always given the name "original object name" + "Particles."
None of the flags of the soft command can be queried. The soft -q command is used only to identify when an object is a
soft body, or when two objects are part of the same soft body. See the examples.
Flags:
- convert : c (bool) [create]
This tells the command that you want the original object to be the actual deformed object. The particle shape portion of
the soft body will be inserted in the same hierarchy as the original, under the same parent (with one additional
intervening transform which is initially the identity). If no flags are passed, then this is assumed. The combination -c
-h 1 is not valid; if you have this in your scripts, remove the -h 1.
- duplicate : d (bool) [create]
This tells the command that you want to make a copy of the original object and use the copy as the deforming geometry.
Input connections to the original object are duplicated. You would do this if you wanted to keep the original object in
your scene and also have a copy of it that was a soft body. This flag and -dh are mutually exclusive.
- duplicateHistory : dh (bool) [create]
This is the same as -d, except that upstream history, is duplicated as well, instead of just input connections. This
flag and -d are mutually exclusive.
- goal : g (float) [create]
This is the same as -d, but in addition it tells the command that you want the resulting soft body to try to follow the
original geometry, using the set goal weight as the value that controls how strongly it is to follow it. A value of 1.0
will try to follow exactly, and a value of 0.0 will not follow at all. The default value is 0.5. This value must be from
0.0 to 1.0. You could use -d with -g, but it is redundant. If you want history to be duplicated, you can use -dh and -g
together.
- hideOriginal : h (bool) [create]
This flag is used only when duplicating (-d, -g, or -dh). If set to true, whichever of the two objects is NOT the soft
object will be hidden. In other words, with -d -h true, the original object will be hidden; with -d -c -h 1 the
duplicate object will be hidden. You would typically do this if you want to use the non-dynamic object as a goal for the
soft one (see -g) but you do not want it visible in the scene. The flags -h 1 and -c are mutually exclusive.
- name : n (unicode) []
This flag is obsolete. If you wish to give your new soft object a name, use the rename command (or from the UI, use the
outliner).Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
Derived from mel command `maya.cmds.soft`
"""
pass
def nParticle(*args, **kwargs):
"""
The nParticle command creates a new nParticle object from a list of world space points. If an nParticle object is
created, the command returns the names of the new particle shape and its associated particle object dependency node. If
an object was queried, the results of the query are returned. Per particle attributes can be queried using the
particleId or the order of the particle in the particle array. If an object was edited, nothing is returned.
Flags:
- attribute : at (unicode) [query,edit]
Used in per particle attribute query and edit. Specifies the name of the attribute being queried or edited.
- cache : ch (bool) [create,query,edit]
Turns caching on/off for the particle shape.
- conserve : c (float) [query,edit]
Conservation of momentum control (between 0 and 1). Specifies the fraction of the particle shape's existing momentum
which is conserved from frame to frame. A value of 1 (the default) corresponds to true Newtonian physics, in which
momentum is conserved.
- count : ct (bool) [query]
Returns the number of particles in the object.
- deleteCache : dc (bool) [create]
Deletes the particle shapes cache. This command is not undoable.Flag can appear in Create mode of commandFlag can have
multiple arguments, passed either as a tuple or a list.
- dynamicAttrList : dal (bool) [query]
Returns a list of the dynamic attributes in the object.
- floatValue : fv (float) [edit]
Used only in per particle attribute edit. Specifies that the edit is of a float attribute and must be followed by the
new float value.
- gridSpacing : grs (float) [create,query]
Spacing between particles in the grid.
- inherit : i (float) [query,edit]
Inherit this fraction (0-1) of emitting object's velocity.
- jitterBasePoint : jbp (float, float, float) [create,query]
Base point (center point) for jitters. The command will create one swatch of jitters for each base point. It will pair
up other flags with base points in the order they are given in the command line. If not enough instances of the other
flags are availble, the last one on the line with be used for all other instances of -jpb.
- jitterRadius : jr (float) [create,query]
Max radius from the center to place the particle instances.
- lowerLeft : ll (float, float, float) [create,query]
Lower left point of grid.
- name : n (unicode) [query,edit]
name of particle object
- numJitters : nj (int) [create,query]
Number of jitters (instances) per particle.
- order : order (int) [query,edit]
Used in per particle attribute query and edit. Specifies the zero-based order (index) of the particle whose attribute is
being queried or edited in the particle array. Querying the value of a per particle attribute requires the -attribute
and -id or -order flags and their arguments to precede the -q flag.
- particleId : id (int) [query,edit]
Used in per particle attribute query and edit. Specifies the id of the particle whose attribute is being queried or
edited. Querying the value of a per particle attribute requires the -attribute and -id or -order flags and their
arguments to precede the -q flag.
- perParticleDouble : ppd (bool) [query]
Returns a list of the per-particle double attributes, excluding initial-state, cache, and information-only attributes.
- perParticleVector : ppv (bool) [query]
Returns a list of the per-particle vector attributes, excluding initial-state, cache, and information-only attributes.
- position : p (float, float, float) []
World-space position of each particle.
- shapeName : sn (unicode) [query,edit]
Specify the shape name used for geometry instancing. DO not confuse this with the -n flag which names the particle
object.
- upperRight : ur (float, float, float) [create,query]
Upper right point of grid.
- vectorValue : vv (float, float, float) [edit]
Used only in per particle attribute edit. Specifies that the edit is of a vector attribute and must be followed by all
three float values for the vector.
Derived from mel command `maya.cmds.nParticle`
"""
pass
def emit(*args, **kwargs):
"""
The emitaction allows users to add particles to an existing particle object without the use of an emitter. At the same
time, it allows them to set any per-particle attribute for the particles that are created with the action.The particles
created do not become a part of the initial state for the particle object, and will disappear when the scene is rewound
unless they are saved into the initial state by the user explicitly. In addition, a particle object will accept
particles from an emit action ONLY at frames greater than or equal to its start frame. For example, if you want to use
the emit action to create particles at frame -5, you must set startFrame for that particle shape to -5 or less.Unlike
many commands or actions, the emit action uses the order of its flags as important information as to how it works. The
-objectand -positionflags can appear anywhere in the argument list. The -attributeand the value flags are interpreted
based on their order. Any value flags after an -attribute flag and before the next -attribute flag will set the values
for the attribute specified by the closest -attribute flag before them in the argument list. See the Examplessection
below for more detail on how these flags work.Currently, no creation expression is executed for the new particles unless
they are created from within a particle expression defined with the dynExpressioncommand or the Expression Editor. If
you want any particular values put into the particles at the time they are created, then those values should be set
using the -attribute, -vectorValue, and -floatValueflags.
Flags:
- attribute : at (unicode) [create]
Specifies the attribute on the particle object that any value flags following it and before the next -attribute flag
will be associated with. The same attribute can be specified later in the command to pick up where the first one left
off. The attributes used must be per-particle attributes. This will accept both long and short names for the attributes.
Note the per-particle attribute must already exist on the particle object prior to being specified via this command
flag.
- floatValue : fv (float) [create]
Sets the float value to be used for the "current" attribute of the "current" particle. By current attribute, it is meant
the attribute specified by the most recent -attribute flag. By current particle, it is meant the particle in the list of
-position flags that corresponds to the number of values that have been set for the "current" attribute. If the current
attribute is a vector-per-particle attribute, then the float value specified will be used for all three components of
the vector.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a
list.
- object : o (unicode) [create]
This flag takes the name of a particleShape or the transform directly above it in the DAG as its parent. It specifies
which object to add the particles to. This flag must be passed, as the selection list is ignored for this action.
- position : pos (float, float, float) [create]
Specifies the positions in the particle object's space (usually world space) where the particles are to be created. One
particle is created for each occurence of this flag.
- vectorValue : vv (float, float, float) [create]
Sets the vector value to be used for the "current" attribute of the "current" particle. By current attribute, it is
meant the attribute specified by the most recent -attribute flag. By current particle, it is meant the particle in the
list of -position flags that corresponds to the number of values that have been set for the "current" attribute. If the
current attribute is a float-per-particle attribute, then the length of the vector described by this flag will be used.
The length is described as SQR( xVal2+ yVal2+ zVal2.
Derived from mel command `maya.cmds.emit`
"""
pass
def paintEffectsDisplay(*args, **kwargs):
"""
Command to set the global display methods for paint effects items In query mode, return type is based on queried flag.
Flags:
- meshDrawEnable : me (bool) [create,query]
Set whether mesh draw is enabled on objectsFlag can appear in Create mode of commandFlag can have multiple arguments,
passed either as a tuple or a list.
Derived from mel command `maya.cmds.paintEffectsDisplay`
"""
pass
def gravity(*args, **kwargs):
"""
A gravity field simulates the Earth's gravitational force. It pulls objects in a fixed direction (generally downward)
entirely independent of their position or mass. The transform is the associated dependency node. Use connectDynamic to
cause the field to affect a dynamic object. If fields are created, this command returns the names of each of the fields.
If a field was queried, the results of the query are returned. If a field was edited, the field name is returned. If
object names are provided or the active selection list is non-empty, the command creates a field for every object in the
list and calls addDynamic to add it to the object. If the list is empty, the command defaults to -pos 0 0 0. Setting the
-pos flag with objects named on the command line is an error. The default for -dx -dy -dz is always the opposite of the
current up direction. For example, if the current up direction is (0,1,0) (a standard Maya configuration), then the
gravity default is -dx 0 -dy -1 -dz 0. The default for -a is 9.8. 9.8 meters per second squared happens to be standard
Earth gravity, but in fact Maya interprets this value as centimeters per second squared. If we were to use it as meters
per second squared then with default Maya units, your particles would vanish almost in the wink of an eye. If you want a
different value, set it in the gravity option box.
Flags:
- attenuation : att (float) [query,edit]
Attentuation rate of field
- directionX : dx (float) [query,edit]
X-component of direction.
- directionY : dy (float) [query,edit]
Y-component of direction.
- directionZ : dz (float) [query,edit]
Z-component of directionFlag can appear in Create mode of commandFlag can have multiple arguments, passed either as a
tuple or a list.
- magnitude : m (float) [query,edit]
Strength of field.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which field is exerted. -1 indicates that the field has no maximum distance.
- name : n (unicode) [query,edit]
name of field
- perVertex : pv (bool) [query,edit]
Per-vertex application. If this flag is set true, then each individual point (CV, particle, vertex,etc.) of the chosen
object exerts an identical copy of the force field. If this flag is set to false, then the force is exerted only from
the geometric center of the set of points.
- position : pos (float, float, float) [query,edit]
Position in space where you want to place a field. The gravity then emanates from this position in space rather than
from an object. Note that you can both use -pos (creating a field at a position) and also provide object names.
- torusSectionRadius : tsr (float) [query,edit]
- volumeExclusion : vex (bool) [query,edit]
- volumeOffset : vof (float, float, float) [query,edit]
- volumeShape : vsh (unicode) [query,edit]
- volumeSweep : vsw (float) [query,edit]
Derived from mel command `maya.cmds.gravity`
"""
pass
def particleFill(*args, **kwargs):
"""
This command generates an nParticle system that fills the selected object with a grid of particles.
Flags:
- closePacking : cp (bool) [create]
If this is on then the particles are positioned as closely as possible in a hexagonal close packing arrangement.
Otherwise particles are packed in a uniform grid lattice.Flag can appear in Create mode of commandFlag can have multiple
arguments, passed either as a tuple or a list.
- doubleWalled : dw (bool) [create]
This flag should be used if the thickness of the object to fill has been modeled( for example a mug ). Otherwise the
particles will be created inside the wall. Note that doubleWalled will not handle some cases very well. For example a
double walled donut shape may get the center region of the donut filled. In cases like this it may be better to make the
internal wall a separate mesh then fill that without using doubleWalled.
- maxX : mxx (float) [create]
The fill max bounds of the particles in X relative to the X bounds of the object. A value of zero is totally empty and
one is totally full. The default value is 1, or fully filled.
- maxY : mxy (float) [create]
The fill max bounds of the particles in Y relative to the Y bounds of the object. A value of zero is totally empty and
one is totally full. The default value is 1, or fully filled.
- maxZ : mxz (float) [create]
The fill max bounds of the particles in Z relative to the Z bounds of the object. A value of zero is totally empty and
one is totally full. The default value is 1, or fully filled.
- minX : mnx (float) [create]
The fill lower bounds of the particles in X relative to the X bounds of the object. A value of zero is totally full and
one is totally empty. The default value is 0, or fully filled.
- minY : mny (float) [create]
The fill lower bounds of the particles in Y relative to the Y bounds of the object. A value of zero is totally full and
one is totally empty. The default value is 0, or fully filled.
- minZ : mnz (float) [create]
The fill lower bounds of the particles in Z relative to the Z bounds of the object. A value of zero is totally full and
one is totally empty. The default value is 0, or fully filled.
- particleDensity : pd (float) [create]
This controls the size of the particles. At a value of 1.0 the particle size will exactly match the grid spacing
determined by the resolution parameter and the object bounds. Particles which overlap the surface will be rejected even
if the center of the particle is inside.
- resolution : rs (int) [create]
This determines the total number of particles generated. It represent the resolution along the largest axis of the
object's bounding box. For a cube shape the total potential particles will be the cube of the resolution. For other
shapes it will be less. The default value for this flag is 10, so 1000 particles could be generated for a cube shape.
Derived from mel command `maya.cmds.particleFill`
"""
pass
def setFluidAttr(*args, **kwargs):
"""
Sets values of built-in fluid attributes such as density, velocity, etc., for individual grid cells or for all cells in
the grid.
Flags:
- addValue : ad (bool) []
Add specified value to attribute
- attribute : at (unicode) [create]
Specifies the fluid attribute for which to set values. Valid attributes are "velocity", "density", "fuel", "color",
"falloff", and "temperature".
- clear : cl (bool) []
Set this attribute to 0
- floatRandom : fr (float) []
If this was a scalar (e.g. density) attribute, use a random value in +-VALUE If fv is specified, it is used as the base
value and combined with the random value. If the fv flag is not specified, the base is assumed to be 0.
- floatValue : fv (float) []
If this was a scalar (e.g. density) attribute, use this value
- lowerFace : lf (bool) [create]
Only valid with "-at velocity". Since velocity values are stored on the edges of each voxel and not at the center, using
voxel based indices to set velocity necessarily affects neighboring voxels. Use this flag to only set velocity
components on the lower left three faces of a voxel, rather than all six.Flag can appear in Create mode of commandFlag
can have multiple arguments, passed either as a tuple or a list.
- reset : re (bool) []
Set this attribute to default value
- vectorRandom : vr (float, float, float) []
If this was a vector (e.g. velocity) attribute, use a random value in +-VALUE If vv is specified, it is used as the base
value and combined with the random value. If the vv flag is not specified, the base is assumed to be 0,0,0.
- vectorValue : vv (float, float, float) []
If this was a vector (e.g. velocity) attribute, use this value
- xIndex : xi (int) [create]
Only return values for cells with this X index
- xvalue : x (bool) []
Only set the first component of the vector-valued attribute specified by the "-at/attribute" flag.
- yIndex : yi (int) [create]
Only return values for cells with this Y index
- yvalue : y (bool) []
Only set the second component of the vector-valued attribute specified by the "-at/attribute" flag.
- zIndex : zi (int) [create]
Only return values for cells with this Z index
- zvalue : z (bool) []
Only set the third component of the vector-valued attribute specified by the "-at/attribute" flag.
Derived from mel command `maya.cmds.setFluidAttr`
"""
pass
def dynExpression(*args, **kwargs):
"""
This command describes an expression that belongs to the specified particle shape. The expression is a block of code of
unlimited length with a C-like syntax that can perform conversions, mathematical operations, and logical decision making
on any numeric attribute(s) or per-particle attribute(s) in the scene. One expression can read and alter any number of
these attributes. Every particle shape in your scene has three expressions, one for the runtimeBeforeDynamics, one for
the runtimeAfterDynamics and one for creation time. The create expression gets executed for every particle in the object
whose age is 0.0. The runtime expression gets executed for each particle with an age greater then 0.0. Unlike
expressions created with the expressioncommand, particle expressions always exist and are a part of the owning particle
object's shape. They default to empty strings, but they are always there. Because of this, there is no need to use the
'-e' flag. Every call to the dynExpression command is considered an edit by default. Per-particle attributes are those
attributes of a particle shape that have a potentially different value for each particle in the object. Examples of
these include positionand velocity. If this command is being sent by the command line or in a script, then the user
should be sure to embed escaped newlines (\n), tabs (\t) for clarity when reading them in the expression editor. Also,
quotes in an expression must be escaped (\") so that they are not confused by the system as the end of your string. When
using the expression editor, these characters are escaped for you unless they are already within quotes. This type of
expression is executed during the evaluation of the dynamics. If an output of the expression is requested before that,
then the dynamics will be force to compute at that time. If dynamics is disabled, then these expressions will have no
effect.
Flags:
- creation : c (bool) [create,query,edit]
Tells the command that the string passed will be a creation expression for the particle shape. This means that this
expression will be executed when a particle is emitted or at the beginning of the scene for existing particles.
- name : n (unicode) []
- runtime : r (bool) []
- runtimeAfterDynamics : rad (bool) [create,query,edit]
Tells the command that the string passed will be a runtime expression for the particle shape. This expression will be
executed after dynamics whenever a particle's age is greater then zero (0).Flag can appear in Create mode of commandFlag
can have multiple arguments, passed either as a tuple or a list.
- runtimeBeforeDynamics : rbd (bool) [create,query,edit]
Tells the command that the string passed will be a runtime expression for the particle shape. This expression will be
executed before dynamics whenever a particle's age is greater then zero (0).
- string : s (unicode) [create,edit]
Set the expression string. This is queriable with the -q/query flag and the -rbd/runtimeBeforeDynamics, the
-rab/runtimeAfterDynamics or the -c/creation flag.
Derived from mel command `maya.cmds.dynExpression`
"""
pass
def connectDynamic(*args, **kwargs):
"""
Dynamic connection specifies that the force fields, emitters, or collisions of an object affect another dynamic object.
The dynamic object that is connected to a field, emitter, or collision object is influenced by those fields, emitters
and collision objects. Connections are made to individual fields, emitters, collisions. So, if an object owns several
fields, if the user wants some of the fields to affect an object, s/he can specify each field with a "-f" flag; but if
the user wants to connect all the fields owned by an object, s/he can specify the object name with the "-f" flag. The
same is true for emitters and collisions. Different connection types (i.e., for fields vs. emitters) between the same
pair of objects are logically independent. In other words, an object can be influenced by another object's fields
without being influenced by its emitters or collisions. Each connected object must be a dynamic object. The object
connected to can be any object that owns fields, emitters, etc.; it need not be dynamic. Objects that can own influences
are particles, geometry objects (nurbs and polys) and lattices. You can specify either the shape name or the transform
name of the object to be influenced.
Flags:
- collisions : c (unicode) [create]
Connects each object to the collision models of the given object.
- delete : d (bool) [create]
Deletes existing connections.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as
a tuple or a list.
- emitters : em (unicode) [create]
Connects each object to the emitters of the given object.
- fields : f (unicode) [create]
Connects each object to the fields of the given object.
Derived from mel command `maya.cmds.connectDynamic`
"""
pass
def dynGlobals(*args, **kwargs):
"""
This node edits and queries the attributes of the active dynGlobals node in the scene. There can be only one active node
of this type. The active dynGlobals node is the first one that was created, either with a "createNode" command or by
accessing/editing any of the attributes on the node through this command.
Flags:
- active : a (bool) [query]
This flag returns the name of the active dynGlobals node in the the scene. Only one dynGlobals node is active. It is the
first on created. Any additional dynGlobals nodes will be ignored.
- listAll : la (bool) [query]
This flag will list all of the dynGlobals nodes in the scene.Flag can appear in Create mode of commandFlag can have
multiple arguments, passed either as a tuple or a list.
- overSampling : os (int) [query,edit]
This flag will set the current overSampling value for all of the particle in the scene.
Derived from mel command `maya.cmds.dynGlobals`
"""
pass
def particle(*args, **kwargs):
"""
The particle command creates a new particle object from a list of world space points. If a particle object is created,
the command returns the names of the new particle shape and its associated particle object dependency node. If an object
was queried, the results of the query are returned. Per particle attributes can be queried using the particleId or the
order of the particle in the particle array. If an object was edited, nothing is returned.
Flags:
- attribute : at (unicode) [query,edit]
Used in per particle attribute query and edit. Specifies the name of the attribute being queried or edited.
- cache : ch (bool) [create,query,edit]
Turns caching on/off for the particle shape.
- conserve : c (float) [query,edit]
Conservation of momentum control (between 0 and 1). Specifies the fraction of the particle shape's existing momentum
which is conserved from frame to frame. A value of 1 (the default) corresponds to true Newtonian physics, in which
momentum is conserved.
- count : ct (bool) [query]
Returns the number of particles in the object.
- deleteCache : dc (bool) [create]
Deletes the particle shapes cache. This command is not undoable.Flag can appear in Create mode of commandFlag can have
multiple arguments, passed either as a tuple or a list.
- dynamicAttrList : dal (bool) [query]
Returns a list of the dynamic attributes in the object.
- floatValue : fv (float) [edit]
Used only in per particle attribute edit. Specifies that the edit is of a float attribute and must be followed by the
new float value.
- gridSpacing : grs (float) [create,query]
Spacing between particles in the grid.
- inherit : i (float) [query,edit]
Inherit this fraction (0-1) of emitting object's velocity.
- jitterBasePoint : jbp (float, float, float) [create,query]
Base point (center point) for jitters. The command will create one swatch of jitters for each base point. It will pair
up other flags with base points in the order they are given in the command line. If not enough instances of the other
flags are availble, the last one on the line with be used for all other instances of -jpb.
- jitterRadius : jr (float) [create,query]
Max radius from the center to place the particle instances.
- lowerLeft : ll (float, float, float) [create,query]
Lower left point of grid.
- name : n (unicode) [query,edit]
name of particle object
- numJitters : nj (int) [create,query]
Number of jitters (instances) per particle.
- order : order (int) [query,edit]
Used in per particle attribute query and edit. Specifies the zero-based order (index) of the particle whose attribute is
being queried or edited in the particle array. Querying the value of a per particle attribute requires the -attribute
and -id or -order flags and their arguments to precede the -q flag.
- particleId : id (int) [query,edit]
Used in per particle attribute query and edit. Specifies the id of the particle whose attribute is being queried or
edited. Querying the value of a per particle attribute requires the -attribute and -id or -order flags and their
arguments to precede the -q flag.
- perParticleDouble : ppd (bool) [query]
Returns a list of the per-particle double attributes, excluding initial-state, cache, and information-only attributes.
- perParticleVector : ppv (bool) [query]
Returns a list of the per-particle vector attributes, excluding initial-state, cache, and information-only attributes.
- position : p (float, float, float) []
World-space position of each particle.
- shapeName : sn (unicode) [query,edit]
Specify the shape name used for geometry instancing. DO not confuse this with the -n flag which names the particle
object.
- upperRight : ur (float, float, float) [create,query]
Upper right point of grid.
- vectorValue : vv (float, float, float) [edit]
Used only in per particle attribute edit. Specifies that the edit is of a vector attribute and must be followed by all
three float values for the vector.
Derived from mel command `maya.cmds.particle`
"""
pass
def collision(*args, **kwargs):
"""
The collision command causes particles to collide with geometry. It also allows you to specify values for the surface
properties (friction and resilience) of the collision. These values are stored in the geoConnector node for the geometry
object. Unlike earlier versions of Maya, there is no separate "collision node." If a soft object is in the selection
list, the collision command assumes that you want to make it a collider. In order to make the soft object collide with
something use, use connectDynamic -c. The collision menu option sorts this out using the lead object rule and issues the
necessary commands. On creation, this command returns a string arrayof the geometry names that were setup for particle
collision.When the command is used to query information, there are several possible return types. These include: If the
-resilience or -friction flag is passed on the command line and a single collision geometry is either selected or on the
command line, then resilience or friction value for that collision geometry is returned as a single floatvalue.If the
-resilience or -friction flag is passed on the command line and a single collision geometry and a single particle object
are either selected or on the command line, then two results might occur. If the particle object is not set up to
collide with the geometry, then an error is displayed stating that. If the objects are set up to collide with each
other, then the resilience or friction value that the particle object is using for collisions with the geometry is
returned as a single floatvalue. This can be different than the geometry's resilience and friction, because the user may
break the connection from the geometry's geoConnector node's resilience or friction to the particle, and set a different
value in the particle's collisionResilience, collisionFriction or collisionOffset attribute that is used for that
geometry. This allows the user to make each particle respond to the same surface differently.If neither flag is pass on
the command line and a single geometry and single particle object are either selected or on the command line, then a
single integervalue of 1 is returned if the objects are set up to collide with each other, and 0 is returned if they are
not.Lastly, if no flags are passed on the command line and a single particle object is either selected or on the command
line, then a string arraywith the names of all geometries that the particle object will collide against and the
multiIndex that the geometries are connected to is returned. The array is formatted as follows: pPlaneShape1:0
pPlaneShape2:2 nurbsSphereShape1:3...where the number following the ":" is the multiIndex.
Flags:
- friction : f (float) [query,edit]
Friction of the surface. This is the amount of the colliding particle's velocity parallel to the surface which is
removed when the particle collides. A value of 0 will mean that no tangential velocity is lost, while a value of 1 will
cause the particle to reflect straight along the normal of the surface.Flag can appear in Create mode of commandFlag can
have multiple arguments, passed either as a tuple or a list.
- name : n (unicode) []
This flag is obsolete. In maya 2.0, there is no longer a separate "collision node," thus there is nothing to name. See
the collision documentation. This flag is included only to allow scripts written with older versions of Maya to run. It
will give you a warning message but will not do anything.
- offset : o (float) []
- resilience : r (float) [query,edit]
Resilience of the surface. This is the amount of the colliding particle's velocity reflected along the normal of the
surface. A value of 1 will give perfect reflection, while a value of 0 will have no reflection along the normal of the
surface.
Derived from mel command `maya.cmds.collision`
"""
pass
def emitter(*args, **kwargs):
"""
Creates an emitter object. If object names are provided or if objects are selected, applies the emitter to the
named/selected object(s)in the scene. Particles will then be emitted from each. If no objects are named or selected, or
if the -pos option is specified, creates a positional emitter. If an emitter was created, the command returns the name
of the object owning the emitter, and the name of emitter shape. If an emitter was queried, the command returns the
results of the query. Keyframeable attributes of the emitter node: rate, directionX, directionY, directionZ,
minDistance, maxDistance, spread.
Flags:
- alongAxis : alx (float) [query,edit]
Initial velocity multiplier in the direction along the central axis of the volume. See the diagrams in the
documentation. Applies only to volume emitters.
- aroundAxis : arx (float) [query,edit]
Initial velocity multiplier in the direction around the central axis of the volume. See the diagrams in the
documentation. Applies only to volume emitters.
- awayFromAxis : afx (float) [query,edit]
Initial velocity multiplier in the direction away from the central axis of the volume. See the diagrams in the
documentation. Used only with the cylinder, cone, and torus volume emitters.
- awayFromCenter : afc (float) [query,edit]
Initial velocity multiplier in the direction away from the center point of a cube or sphere volume emitter. Used only
with the cube and sphere volume emitters.
- cycleEmission : cye (unicode) [query,edit]
Possible values are "none" and "frame." Cycling emission restarts the random number stream after a specified interval.
This can either be a number of frames or a number of emitted particles. In each case the number is specified by the
cycleInterval attribute. Setting cycleEmission to "frame" and cycleInterval to 1 will then re-start the random stream
every frame. Setting cycleInterval to values greater than 1 can be used to generate cycles for games work.
- cycleInterval : cyi (int) [query,edit]
Specifies the number of frames or particles between restarts of the random number stream. See cycleEmission. Has no
effect if cycleEmission is set to None.
- directionX : dx (float) [query,edit]
x-component of emission direction. Used for directional emitters, and for volume emitters with directionalSpeed.
- directionY : dy (float) [query,edit]
y-component of emission direction. Used for directional emitters, and for volume emitters with directionalSpeed.
- directionZ : dz (float) [query,edit]
z-component of emission direction. Used for directional emitters, and for volume emitters with directionalSpeed.
- directionalSpeed : drs (float) [query,edit]
For volume emitters only, adds a component of speed in the direction specified by the directionX, Y, and Z attributes.
Applies only to volume emitters. Does not apply to other types of emitters.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which emission ends.
- minDistance : mnd (float) [query,edit]
Minimum distance at which emission starts.
- name : n (unicode) [query,edit]
- needParentUV : nuv (bool) [query,edit]
If aNeedParentUV is true, compute parentUV value from each triangle or each line segment, then send out to the target
particle object. You also need to add parentU and parentV attributes to the particle object to store these values.
- normalSpeed : nsp (float) [query,edit]
Normal speed multiple for point emission. For each emitted particle, multiplies the component of the velocity normal to
the surface or curve by this amount. Surface and curve emitters only.
- position : pos (float, float, float) [query,edit]
Positional emitter at world space location (x,y,z).
- randomDirection : rnd (float) [query,edit]
Magnitude of a random component of the speed from volume emission.
- rate : r (float) [query,edit]
Rate at which particles emitted (can be non-integer). For point emission this is rate per point per unit time. For
surface emission it is rate per square unit of area per unit time.
- scaleRateByObjectSize : sro (bool) [query,edit]
Applies to curve and surface emitters, only. If true, number of particles is determined by object size (area or length)
times rate value. If false, object size is ignored and the rate value is used without modification. The former is the
way Maya behaved prior to version 3.0.Flag can appear in Create mode of commandFlag can have multiple arguments, passed
either as a tuple or a list.
- scaleSpeedBySize : ssz (bool) [query,edit]
Indicates whether the scale of a volume emitter affects its velocity.
- speed : spd (float) [query,edit]
Speed multiple. Multiplies the velocity of the emitted particles by this amount. Does not apply to volume emitters. For
that emitter type, use directionalSpeed.
- speedRandom : srn (float) [query,edit]
Identifies a range of random variation for the speed of each generated particle. If set to a non-zero value, speed
becomes the mean value of the generated particles, whose speeds vary by a random amount up to plus or minus
speedRandom/2. For example, speed 5 and speedRandom 2 will make the speeds vary between 4 and 6.
- spread : sp (float) [query,edit]
Random spread (0-1), as a fraction of 90 degrees, along specified direction. Directional emitters only.
- tangentSpeed : tsp (float) [query,edit]
Tangent speed multiple for point emission. For each emitted particle, multiplies the component of the velocity tangent
to the surface or curve by this amount. Surface and curve emitters only.
- torusSectionRadius : tsr (float) [query,edit]
Section radius for a torus volume. Applies only to torus. Similar to the section radius in the torus modelling
primitive.
- type : typ (unicode) [query,edit]
Type of emitter. The choices are omni | dir | direction | surf | surface | curve | curv. The default is omni. The full
definition of these types are: omnidirectional point emitter, directional point emitter, surface emitter, or curve
emitter.
- volumeOffset : vof (float, float, float) [query,edit]
Volume offset of the emitter. Volume offset translates the emission volume by the specified amount from the actual
emitter location. This is in the emitter's local space.
- volumeShape : vsh (unicode) [query,edit]
Volume shape of the emitter. Sets/edits/queries the field's volume shape attribute. If set to any value other than
"none", determines a 3-D volume within which particles are generated. Values are: "cube," "sphere," "cylinder," "cone,"
"torus."
- volumeSweep : vsw (float) [query,edit]
Volume sweep of the emitter. Applies only to sphere, cone, cylinder, and torus. Similar effect to the sweep attribute in
modelling.
Derived from mel command `maya.cmds.emitter`
"""
pass
def truncateHairCache(*args, **kwargs):
"""
This command sets the end time of a hair cache to the current time. If the current time is less than the end time of the
cache, the cache is truncated so that only the portion of the cache up to and including the current time is preserved.
In query mode, return type is based on queried flag.
Derived from mel command `maya.cmds.truncateHairCache`
"""
pass
def arrayMapper(*args, **kwargs):
"""
Create an arrayMapper node and connect it to a target object. If the -type flag is used, then this command also creates
an external node used for computing the output values. If the input attribute does not already exist, it will be
created. The output attribute must exists. If a flag is omitted, the selection list will be used to supply the needed
objects. If none are found, that action is omitted.
Flags:
- destAttr : da (unicode) [create]
Specifies the attribute which will be the downstream connection for the output data from the mapper node. The attribute
type will be used to determine which output attribute to use: float array gets outValuePP, vector array gets outColorPP.
If the flag is omitted, no output connection is made.
- inputU : iu (unicode) [create]
Specifies the upstream attribute to connect to the mapper's uCoordPP attribute. If the flag is omitted, no input
connection is made.
- inputV : iv (unicode) [create]
Specifies the upstream attribute to connect to the mapper's vCoordPP attribute. If the flag is omitted, no input
connection is made.
- mapTo : mt (unicode) [create]
Specifies an existing node to be used to compute the output values. This node must be of the appropriate type.
Currently, only ramp nodes may be used.Flag can appear in Create mode of commandFlag can have multiple arguments, passed
either as a tuple or a list.
- target : t (unicode) [create]
Specifies the target object to be connected to.
- type : ty (unicode) [create]
Specifies the node type to create which will be used to compute the output values. Currently, only ramp is valid. If the
flag is omitted, no connection is made and the external node is not created.
Derived from mel command `maya.cmds.arrayMapper`
"""
pass
def setDynamic(*args, **kwargs):
"""
setDynamic sets the isDynamic attribute of particle objects on or off. If no objects are specified, it sets the
attribute for any selected objects. If -all is thrown, it sets the attribute for all particle objects in the scene. By
default it sets the attribute true (on); if the -off flag is thrown, it sets the attribute false (off). WARNING:
setDynamic is obsolescent. This is the last version of Maya in which it will be supported.
Flags:
- allOnWhenRun : awr (bool) [create]
Obsolete, no longer suppported or necessary.
- disableAllOnWhenRun : dwr (bool) [create]
Obsolete, no longer suppported or necessary.Flag can appear in Create mode of commandFlag can have multiple arguments,
passed either as a tuple or a list.
- setAll : all (bool) [create]
Set for all objects.
- setOff : off (bool) [create]
Sets isDynamic false.
- setOn : on (bool) [create]
Sets isDynamic true. This flag is set by default.
Derived from mel command `maya.cmds.setDynamic`
"""
pass
def runup(*args, **kwargs):
"""
runup plays the scene through a frame of frames, forcing dynamic objects to evaluate as it does so. If no max frame is
specified, runup runs up to the current time.
Flags:
- cache : cch (bool) [create]
Cache the state after the runup.
- fromPreviousFrame : fpf (bool) [create]
Run up the animation from the previously evaluated frame. If no flag is supplied this is the default.
- fromStartFrame : fsf (bool) [create]
Run up the animation from the start frame. If no flag is supplied -fromPreviousFrame is the default.
- maxFrame : mxf (time) [create]
Ending time for runup, in current user time units. The runup will always start at the minimum start frame for all
dynamic objects.
- state : st (bool) [create]
Turns runup and cache on/off.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as
a tuple or a list.
Derived from mel command `maya.cmds.runup`
"""
pass
def rigidBody(*args, **kwargs):
"""
This command creates a rigid body from a polygonal or nurbs surface.
Flags:
- active : act (bool) [create,query,edit]
Creates a rigid body that is active. An active rigid body accepts and causes collisions and is effected by dynamic
fields. This is the default.
- angularVelocity : av (bool) [query]
Current angular velocity of rigid body.
- applyForceAt : afa (unicode) [create,query,edit]
Determines how forces are applied to the rigid body. The choices are centerOfMass | boundingBox | verticesOrCVs.
Default: boundingBox
- bounciness : b (float) [create,query,edit]
Sets the restitution (or bounciness) of the rigid body. Range: 0.0 - 2.0 Default: 0.6
- cache : c (bool) [create,query,edit]
Turns caching on (1) or off (0) for the rigid body. Default: off
- centerOfMass : com (float, float, float) [create,query,edit]
Sets the center of mass (x,y,z) of the rigid body. Default: actual center of mass.
- collisions : cl (bool) [create,query,edit]
Truns collisions on/off for the rigid body. If the collisions are turned of the rigid body will not collide with any
other rigid body. Default: on.
- contactCount : cc (bool) [query]
returns the current contact count for the rigid body.
- contactName : cn (bool) [query]
returns all the rigid body names which are in contact with this shape. One name for each contact will be returned.Flag
can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
- contactPosition : cp (bool) [query]
returns all the contact position. One position for each contact will be returned.
- damping : dp (float) [create,query,edit]
Sets the damping value of the rigid body. Range: -2.0 - 2.0 Default: 0.0
- deleteCache : dc (bool) [edit]
Deletes the cache (if one exists) of the rigid body.
- dynamicFriction : df (float) [create,query,edit]
Sets the dynamic friction for the rigid body. Range: 0.0 - 1.0 Default: 0.2
- force : f (bool) [query]
Current force on the rigid body.
- ignore : ig (bool) [create,query,edit]
Causes the rigid body to be ignored in the rigid solver. Default: off
- impulse : i (float, float, float) [create,edit]
Applies an impulse (instantaneous) force on a rigid body. Default: 0.0 0.0 0.0
- impulsePosition : imp (float, float, float) [create,edit]
The position at which the impulse is applied. Default: the bodies center of mass.
- initialAngularVelocity : iav (float, float, float) [create,query,edit]
Sets the initial angular velocity of the rigid body. Default: 0.0 0.0 0.0
- initialVelocity : iv (float, float, float) [create,query,edit]
Sets the initial velocity of the rigid body. Default: 0.0 0.0 0.0
- layer : l (int) [create,query,edit]
Sets the collision layer of the rigid body. Only rigid bodies in the same collision layer can collide with each other.
Range: = 0 Default: 0.
- lockCenterOfMass : lcm (bool) [create,query,edit]
Locks the center of mass for the rigid body. Default: off
- mass : m (float) [create,query,edit]
Sets the mass of the rigid body. Range: 0 Default: 1.0
- name : n (unicode) [create,query,edit]
Assigns the rigid body the given name.
- orientation : o (float, float, float) [create,query,edit]
Sets the initial orientation (x,y,z) of the rigid body. Default: current orientation.
- particleCollision : pc (bool) [create,query,edit]
Turns the ability for a rigid body to collide with particles on and off. The particles will exert a force on the rigid
body. Default: off
- passive : pas (bool) [create,query,edit]
Creates a rigid body that is passive. A passive rigid body does not react to collisions but active rigid bodies can
collide with it. Dynamic Fields will not effect a passive rigid body. Only passive rigid bodies can be keyframed.
- position : p (float, float, float) [create,query,edit]
Sets the initial position (x,y,z) of the rigid body. Default: current position.
- removeShape : rs (unicode) []
- solver : slv (unicode) [create,query,edit]
The name of the solver which this rigid node is to resided. If the solver does not exists then the rigid body will not
be created. If the edit flag is thrown add the solver exists, the rigid body will be moved to that solver.
- spinImpulse : si (float, float, float) [create,edit]
Applies an spin impulse (instantaneous rotational) force on a rigid body. Default: 0.0 0.0 0.0
- standInObject : sio (unicode) [create,query,edit]
Causes the simulator to use a stand in object for the simulation. The choices are none | cube | sphere. The default is
none. Default: none
- staticFriction : sf (float) [create,query,edit]
Sets the static friction for the rigid body. Range: 0.0 - 1.0 Default: 0.2
- tesselationFactor : tf (int) [create,query]
Sets the tesselation factor for a rigid body surface. Range: = 10 Default: 200.
- velocity : vel (bool) [query]
Current velocity of rigid body.
Derived from mel command `maya.cmds.rigidBody`
"""
pass
def air(*args, **kwargs):
"""
The air field simulates the effects of moving air. The affected objects will be accelerated or decelerated so that their
velocities match that of the air. With the '-vco true' flag thrown, only accelerations are applied. By parenting an air
field to a moving part of an object (ie. a foot of a character) and using '-i 1 -m 0 -s .5 -vco true' flags, one can
simulate the movement of air around the foot as it moves, since the TOTAL velocity vector of the field would be only
based on the movement of the foot. This can be done while the character walks through leaves or dust on the ground. For
each listed object, the command creates a new field. The transform is the associated dependency node. Use connectDynamic
to cause the field to affect a dynamic object. If fields are created, this command returns the field names. If a field
was queried, the results of the query are returned. If a field was edited, the field name is returned. If the -pos flag
is specified, a field is created at the position specified. If not, if object names are provided or the active selection
list is non-empty, the command creates a field for every object in the list and calls addDynamic to add it to the
object; otherwise the command defaults to -pos 0 0 0. Setting the -pos flag with objects named on the command line is an
error.
Flags:
- attenuation : att (float) [query,edit]
Attentuation rate of field The air field attenuates so as to taper the field's magnitude to zero when the maximum
distance is reached. Thus, attenuation has no effect unless useMaxDistance is true and a positive maximum distance has
been set.
- directionX : dx (float) [query,edit]
- directionY : dy (float) [query,edit]
- directionZ : dz (float) [query,edit]
Direction that the air will try to match the affected particles' velocity to. NOTE: This is not the velocity; this is
only the direction. Use the -s flag to set the speed.
- enableSpread : es (bool) [query,edit]
This tells the system whether or not to use the spread angle given by '-sp'. If this is 'false' then all connected
objectswithin the maximum distance will be affected. Also, if this is set to 'false', all affected objects are forced to
match their velocities along the direction vector. If this is set to 'true' and spread is used, then the direction of
the force is along the direction from the field to the object.
- fanSetup : fs (bool) [edit]
Similar to 'windSetup' except that the effects of a fan or a person blowing air are approximated. The user can pass the
same flags on the command line to adjust them from the defaults. These are the values that get set to approximate a
'fan': inheritVelocity 1.0 inheritRotation true componentOnly false enableSpread true spread .5 (45 degrees from center
)
- inheritRotation : iro (bool) [query,edit]
If this is set to 'true', then the direction vector described with -dx, -dy, and -dz will be considered local to the
owning object. Therefore, if the owning object's transform undergoes any rotation (by itself or one of its parents), the
direction vector of the air field will undergo that same rotation.
- inheritVelocity : iv (float) [query,edit]
Amount (from 0 to 1) of the field-owner's velocity added to the vector determined by the direction and speed flags. The
combination of these two vectors makes up the TOTAL velocity vector for the air field. This allows the air to be
determined directly by the motion of the owning object.
- magnitude : m (float) [query,edit]
The speed along the direction vector that the air is moving. Use this in conjunction with the -dx -dy -dz flags.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which field is exerted. -1 indicates that the field has no maximum distance.
- name : n (unicode) [query,edit]
name of field
- perVertex : pv (bool) [query,edit]
Per-vertex application. If this flag is set true, then each individual point (CV, particle, vertex,etc.) of the chosen
object exerts an identical copy of the force field. If this flag is set to false, then the force is exerted only from
the geometric center of the set of points.
- position : pos (float, float, float) [query,edit]
Position in space where you want to place a field. The field then emanates from this position in space rather than from
an object. Note that you can both use -pos (creating a field at a position) and also provide object names.
- speed : s (float) [query,edit]
How fast the affected objects' speed reaches the speed (based on the -mag, -dx, -dy, -dz flags) of the air field. This
value gets clamped internally to be between 0.0 and 1.0. A value of 0.0 will make the air field have no effect. A value
of 1.0 will try to match the air field's speed much quicker, but not necessarily immediately.
- spread : sp (float) [query,edit]
This represents the angle from the direction vector within which objects will be affected. The values are in the range
of 0 to 1. A value of 0 will result in an effect only exactly in front of the air field along the direction vector. A
value of 1 will result in any object in front of the owning object, 90 degrees in all direction from the direction
vector.
- torusSectionRadius : tsr (float) [query,edit]
- velocityComponentOnly : vco (bool) [query,edit]
If this is 'false', the air will accelerate or decelerate the affected objects so that their velocities will eventually
match the TOTAL velocity vector of the air field. If this is 'true', only ACCELERTION is applied to the affected objects
so that their velocity component along the TOTAL velocity vector matches or is greater in magnitude than the TOTAL
velocity vector. This will not slow objects down to match velocities, only speed them up to match components. This is
most useful when using the -iv flag with a value 0.
- volumeExclusion : vex (bool) [query,edit]
- volumeOffset : vof (float, float, float) [query,edit]
- volumeShape : vsh (unicode) [query,edit]
- volumeSweep : vsw (float) [query,edit]
- wakeSetup : wks (bool) [edit]
Like the 'windSetup' and 'fanSetup', 'wakeSetup' sets certain values in the field to approximate the movement of air
near a moving object, such as a character's foot or hand. The values that are set are: inheritVelocity 1.0
inheritRotation false componentOnly true enableSpread false speed 0.0Flag can appear in Create mode of commandFlag can
have multiple arguments, passed either as a tuple or a list.
- windSetup : wns (bool) [edit]
This will set some of the values above in a way that approximates the effects of a basic wind. This allows the user to
then change certain values as he/she wishes on the same command line. First the preset values get set, and then any
other flags that were passed get taken into account. These are the values that get set to approximate 'wind':
inheritVelocity 0.0 inheritRotation true componentOnly false enableSpread false
Derived from mel command `maya.cmds.air`
"""
pass
def expressionEditorListen(*args, **kwargs):
"""
Listens for messages for the Expression Editor, at its request, and communicates them to it. This action is for internal
use only and should not be called by users. This action should be called only by the Expression Editor.
Flags:
- listenFile : lf (unicode) [create]
Listen for changes to the file argument.Flag can appear in Create mode of commandFlag can have multiple arguments,
passed either as a tuple or a list.
- listenForAttr : la (unicode) [create]
Listen for changes to the attributes of the node argument.
- listenForExpression : le (unicode) [create]
Listen for changes to the named expression
- listenForName : ln (unicode) [create]
Listen for name changes for the node argument.
- stopListenForAttr : sla (unicode) [create]
Stop listening for changes to the attributes of the node argument.
- stopListenForExpression : sle (unicode) [create]
Stop listening for changes to the named expression
- stopListenForName : sln (unicode) [create]
Stop listening for name changes for the node argument.
Derived from mel command `maya.cmds.expressionEditorListen`
"""
pass
def setParticleAttr(*args, **kwargs):
"""
This action will set the value of the chosen attribute for every particle or selected component in the selected or
passed particle object. Components should not be passed to the command line. For setting the values of components, the
components must be selected and only the particle object's names should be passed to this action. If the attribute is a
vector attribute and the -vv flag is passed, then the three floats passed will be used to set the values. If the
attribute is a vector and the -fv flag is pass and the -vv flag is not passed, then the float will be repeated for each
of the X, Y, and Z values of the attribute. Similarly, if the attribute is a float attribute and a vector value is
passed, then the length of the vector passed will be used for the value. Note: The attribute passed must be a Per-
Particle attribute.
Flags:
- attribute : at (unicode) [create]
Tells the action which attribute you want to set
- floatValue : fv (float) [create]
Tells what you want the value to be set to of a float attribute
- object : o (unicode) [create]
If this flag is passed and the STRING is the name of a particle object's transform or shape, then ONLY that object will
be edited, ignoring the selection list or command line, and ALL of its particles' values will be changed for the
specified attribute.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple
or a list.
- randomFloat : rf (float) [create]
Tells the command to add a random value from -FLOAT to +FLOAT to the results of each particle. The default is 0.0.
- randomVector : rv (float, float, float) [create]
Tells the command to add a random value from -x,-y,-zto x,y,zto the results of each particle. The default 0 0 0.
- relative : r (bool) [create]
If this is set to TRUE (the default is FALSE), then the float or vector value will be added to the current value for
each particle.
- vectorValue : vv (float, float, float) [create]
Tells what you want the value to be set to of a vector attribute
Derived from mel command `maya.cmds.setParticleAttr`
"""
pass
def newton(*args, **kwargs):
"""
A Newton field pulls an object towards the exerting object with force dependent on the exerting object's mass, using
Newton's universal law of gravitation. The transform is the associated dependency node. Use connectDynamic to cause the
field to affect a dynamic object. If fields are created, this command returns the names of each of the fields. If a
field was queried, the results of the query are returned. If a field was edited, the field name is returned. If object
names are provided or the active selection list is non-empty, the command creates a field for every object in the list
and calls addDynamic to add it to the object. If the list is empty, the command defaults to -pos 0 0 0. Setting the -pos
flag with objects named on the command line is an error.
Flags:
- attenuation : att (float) [query,edit]
Attentuation rate of field
- magnitude : m (float) [query,edit]
Strength of field.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which field is exerted. -1 indicates that the field has no maximum distance.
- minDistance : mnd (float) [query,edit]
Minimum distance at which field is exerted. Distance is in the denominator of the field force equation. Setting md to a
small positive number avoids bizarre behavior when the distance gets extremely small.Flag can appear in Create mode of
commandFlag can have multiple arguments, passed either as a tuple or a list.
- name : n (unicode) [query,edit]
name of field
- perVertex : pv (bool) [query,edit]
Per-vertex application. If this flag is set true, then each individual point (CV, particle, vertex,etc.) of the chosen
object exerts an identical copy of the force field. If this flag is set to false, then the froce is exerted only from
the geometric center of the set of points.
- position : pos (float, float, float) [query,edit]
Position in space where you want to place a field. The newton then emanates from this position in space rather than from
an object. Note that you can both use -pos (creating a field at a position) and also provide object names.
- torusSectionRadius : tsr (float) []
- volumeExclusion : vex (bool) []
- volumeOffset : vof (float, float, float) []
- volumeShape : vsh (unicode) []
- volumeSweep : vsw (float) []
Derived from mel command `maya.cmds.newton`
"""
pass
def goal(*args, **kwargs):
"""
Specifies the given objects as being goals for the given particle object. If the goal objects are geometry, each
particle in the particle object will each try to follow or match its position to that of a certain vertex/CV/lattice
point of the goal. If the goal object is another particle object, each particle will try to follow a paricle of the
goal. In any other case, all the particles will try to follow the current location of the goal object's transform. You
can get this latter behavior for a geometry or particle object too by using -utr true. The goal weight can be keyframed.
It lives on the particle object to which the goal was added and is a multi-attribute.
Flags:
- goal : g (unicode) [create,query]
This flag specifies string to be a goal of the particle object on the command line or the currently selected particle
object. This flag can be used multiple times to specify multiple goals for a particle object. Query is for use by the
attribute editor.
- index : i (bool) [query]
Returns array of multi-attribute indices for the goals. Intended for use by the Attribute Editor.
- useTransformAsGoal : utr (bool) [create]
Use transform of specified object instead of the shape. Meaningful only for particle and geometry objects. Can only be
passed once, applies to all objects passed with -g.Flag can appear in Create mode of commandFlag can have multiple
arguments, passed either as a tuple or a list.
- weight : w (float) [create]
This specifies the goal weight as a value from 0 to 1. A value of 0 means that the goal's position will have no effect
on the particle object, while a weight of 1 will make the particle object try to follow the goal object exactly. This
flag can only be passed once and sets the weight for every goal passed with the -g/-goal flag.
Derived from mel command `maya.cmds.goal`
"""
pass
def dynControl(*args, **kwargs):
"""
Flags:
- autoCreate : ac (bool) []
- oversample : os (int) []
- particleCache : pc (bool) []
- particleLOD : pld (float) []
- particlesOn : po (bool) []
- rigidOn : ro (bool) []
- seed : sd (int) []
- startTime : st (time) []
- traceDepth : td (int) []
Derived from mel command `maya.cmds.dynControl`
"""
pass
def dynCache(*args, **kwargs):
"""
Cache the current state of all particle shapes at the current time.
Derived from mel command `maya.cmds.dynCache`
"""
pass
def pfxstrokes(*args, **kwargs):
"""
This command will loop through all the Paint Effects strokes, including pfxHair nodes, and write the current state of
all the tubes to a file. For normal stroke nodes tubes must be ON in the brush or there will be no output. For pfxHair
nodes there will always be output, but the format is different than for stroke nodes(however one can assign a brush with
tubes = ON to a pfxHair node, in which case it will output the same format as strokes). The general file format is
ASCII, using commas to separate numerical values and newlines between blocks of data. The format used for pfxHair nodes
presents the hair curves points in order from root to tip of the hair. The hairs follow sequentially in the following
fashion: NumCvs pointX,pointY,pointZ, normalX,normalY,normalZ, width, colorR,colorG,colorB, paramU pointX,pointY,pointZ,
normalX,normalY,normalZ, width, colorR,colorG,colorB, paramU etc... NumCvs pointX,pointY,pointZ,
normalX,normalY,normalZ, width, colorR,colorG,colorB, paramU etc.. The format used to output files for brushes with
tubes=ON is more complex. The tubes can branch and the order the segments are written is the same order they are drawn
in. Slowly drawing a tall grass brush in the paint effects panel can help to illustrate the order the segments will
appear in the file. New tubes can start "growing" before others are finished. There is no line for "NumCvs". Instead all
data for each segment appears on each line. The data on each line is the same as passed into the paint effects runtime
function. See the argument list of paintRuntimeFunc.mel for the order and a description of these parameters. The
parameters match up exactly in the order they appear on a line of the output file with the order of arguments to this
function. If one wishes to parse the output file and connect the segments together into curves the branchId, parentId
and siblingCnt parameters can help when sorting which segment connects to which line. Using the -postCallback option
will write out the tubes data after it has been proessed by the runTime callback.
Flags:
- filename : fn (unicode) [create]
The output file.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a
list.
- postCallback : pc (bool) [create]
Output information to the file after the Runtime Callback MEL function has been invoked. The default is to output the
information prior to the callback.
- selected : sl (bool) [create]
Only loop through the selected strokes.
Derived from mel command `maya.cmds.pfxstrokes`
"""
pass
def addDynamic(*args, **kwargs):
"""
Makes the "object" specified as second argument the source of an existing field or emitter specified as the first
argument. In practical terms, what this means is that a field will emanate its force from its owner object, and an
emitter will emit from its owner object. addDynamic makes the specified field or emitter a child of the owner's
transform (adding it to the model if it was not already there), and makes the necessary attribute connections. If either
of the arguments is omitted, addDynamic searches the selection list for objects to use instead. If more than one
possible owner or field/emitter is selected, addDynamic will do nothing. If the specified field/emitter already has a
source, addDynamic will remove the current source and replace it with the newly specified source. If a subset of the
owner object's cvs/particles/vertices is selected, addDynamic will add the field/emitter to that subset only.
Modifications:
- returns a list of PyNode objects
Derived from mel command `maya.cmds.addDynamic`
"""
pass
def dynExport(*args, **kwargs):
"""
Export particle data to disk files. For cache export (-format cache), dynExport also sets three attributes of the
current dynGlobals node. It sets the useParticleRenderCache attribute to true, and the
min/maxFrameOfLastParticleRenderCache attributes to correspond to the min and max frames. Exported .pda or .pdb files
are assigned a name of form object name.frame.extension, where extensionis "pda" or "pdb." The naming convention for
.pdc files is similar but does not use frame numbers, it uses a more precise representation of the time instead. By
default, the pda and pdb formats export all per-particle attributes, and all integer or float type attributes except
those which are hidden or not storable. (Exception: level of detail is not exported, by default) The pdc format exports
all attributes which the particle object needs for its state cache. To specify only selected attributes, use the -atr
flag (which is multi-use). In general, it is recommended not to use this flag with pdc type, since you need all the
attributes in order for the cache to be useful. dynExport exports data for the current frame, or for a range of frames
specified with -mnf and -mxf. If you are not already at the start frame, dynExport will run up the scene for you. VERY
VERY IMPORTANT NOTE:If you use dynExport in -prompt mode, it does NOT automatically force evaluation of your objects.
You must do this yourself from your script. The easiest way is to request each particle object's "count" attribute each
frame. You must request the count attribute for each object you want to export, because their solvers run independently
of one another. In interactive mode, objects WILL get evaluated automatically and you don't have to worry about any of
this. When exporting a particle object whose particles are created from collisions involving particles in another
particle object(s), you must make sure you simultaneously export all the particle objects involved in the dependency
chain otherwise you will get an empty cache file. For non-per-particle attributes, pda and pdb formats write the
identical value once for each particle. The following types of non-per-particle attributes can be exported: float,
double, doubleLinear, doubleAngle, byte, short, long, enum. The first four are exported as "Real" (in PDB parlance), and
the last four as "Integer." In the pda and pdb formats, "particleId" and "particleId0" are exported as Integer, and are
exported under the names "id" and "id0" respectively. All other attributes are exported under their long names.
Flags:
- allObjects : all (bool) [create]
Ignore the selection list and export all particle objects. If you also specify an object name, the -all flag will be
ignored.
- attribute : atr (unicode) [create]
Name of attribute to be exported. If any specified object does not have one of the specified attributes, dynExport will
issue an error and not do the export.
- format : f (unicode) [create]
Desired format: "binary" ("pdb"), "ascii" ("pda"), or "cache" ("pdc").The pdc format is for use by the Maya particle
system to cache particle data. The pda and pdb format options are intended for pipelines involving other software (for
example, sending the data to some program written in-house); Maya cannot read pda or pdb files.There is no formal
description of the PDB format, but the ExploreMe/particles/readpdb directory contains the source and Makefile for a
small, simple C program called "readpdb" which reads it. Note that you must compile and run readpdb on the platform
which you exported the files on.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either
as a tuple or a list.
- maxFrame : mxf (time) [create]
Ending frame to be exported.
- minFrame : mnf (time) [create]
Starting frame to be exported. The export operation will play the scene through from min frame to max frame as it
exports.
- onlyUpdateParticles : oup (bool) []
- overSampling : os (int) [create]
OverSampling to be used during export.
- path : p (unicode) [create]
This option allows you to specify a subdirectory of the workspace "particles" directory where you want the exported
files to be stored. By default, files are stored in the workspace particles directory, i.e., -path is relative to that
directory.Please Read This:This is a change from previous versions of Maya in which the path was relative to the
workspace root directory.) You can set the "particles" directory anywhere you want using the project window or workspace
-fr command. (In this way, you can use an absolute path for export).The -path flag cannot handle strings which include
"/" or "\", in other words, it lets you go down only one level in the directory hierarchy. If you specify a path which
doesn't exist, the action will create it if possible; if it can't create the path it will warn you and fail. If you are
using a project for which a particle data directory is not defined, dynExport will create a default one called
"particles" and add it to your workspace.
Derived from mel command `maya.cmds.dynExport`
"""
pass
def spring(*args, **kwargs):
"""
The spring command can do any of the following:\* create a new spring object (shape plus transform). The shape contains
springs between the points (particles, cvs, etc.) of the objects selected or listed on the command line.\* create new
springs and add them to an existing spring object\* edit or query certain attributes of an existing spring objectOne
"spring object" may have hundreds or even thousands of individual springs. Certain attributes of the spring object
specify exactly where the springs are attached to which other objects.Springs may be attached to the following:
particles, vertices of soft bodies, CVs or edit points of curves or surfaces, vertices of polygonal objects, and points
of lattices. In the case where one endpoint of a spring is non-dynamic (a CV, edit point, etc.), the spring does not
affect its motion, but the motion of the point affects the spring. A spring will be created only if at least one of the
endpoints is dynamic: for example, a spring will never be created between two CVs. A single spring object can hold
springs which are incident to any number of other objects.The spring has creation-only flags and editable flags.
Creation-only flags (minDistance, maxDistance, add, exclusive, all, wireframe, walklength, checkExisting) can be used
only when creating new springs (including adding springs to existing spring object). Editable flags modify attributes of
an existing spring object.If a spring object is created, this command returns the names of the shape and transform. If a
spring object is queried, the command returns the results of the query.
Flags:
- addSprings : add (bool) [create]
If specified, springs will be added to the existing selected set of springs. (Default is to create a new spring object.)
- allPoints : all (bool) [create,edit]
If True, sets the mode of spring application to All. This will add springs between all points selected. (Default is
False.)
- count : ct (bool) [query]
Return the number of springs in the shape. Query-only. We maintain this flag only for compatibility with earlier
versions of Maya. To get the count of springs, it is much faster and simpler to use the spring shape's count attribute:
getAttr shapeName.count.
- damp : dmp (float) []
- damping : d (float) [create,query,edit]
Damping factor for the springs created in the spring object. (Default = 0.2 )
- dampingPS : dPS (float) [create,query,edit]
Damping factor for the springs created in the spring object. This will initialize all the entries in dampingPS to the
specified value. In both the flag and the attribute name, "PS" stands for "per-spring." (Default = 0.2 )
- endForceWeight : efw (float) [create,query,edit]
Amount of the force of the spring that gets applied to the point to which the spring ends. Valid range is from 0.0 to
1.0. (Default = 1.0 )
- exclusive : exc (bool) [create]
If true, tells the command to create springs only between pairs of points which are not in the same object. (Default is
False.)
- length : l (float) [create,query,edit]
Vestigial form of "restLength." Please use "restLength" instead.
- maxDistance : mxd (float) [create,edit]
Maximum distance between two points that a spring would be considered.
- minDistance : mnd (float) [create]
Minimum distance between two points that a spring would be considered. (Default = 0.0. See Defaults for more information
on this flag's default.)
- minMax : mm (bool) [create]
If True, sets the mode of the spring application to Min/Max. This will add springs between all points from the specified
point groups that are between the minimum and maximum distance values set with min and max. (Default is False.) Note:
This gets automatically set if either the min or max flags are used.
- name : n (unicode) [create,query]
Name of spring object.
- noDuplicate : nd (bool) [create]
Check for existing springs and don't add a new spring between two points already connected by a spring in the same
object. Only the object the command is working on is checked. This flag is relevant only when using -add. (Default =
false)Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
- restLength : rl (float) [create,query,edit]
Per-object rest length for the new springs. Springs can use either their per-object or per-spring rest length. See the
-lPS and -ulp flags.
- restLengthPS : rPS (float) [create,query,edit]
Per-spring rest length for the new springs. This will initialize all the entries in restLengthPS to the specified value.
If this flag is not thrown, each rest length will be initialized to the distance between the two points at the time the
spring is created (i.e., the initial length of the spring). When playing back, springs can use either their per-spring
or per-object rest length. See the -rl and -urp flags. In both the flag and the attribute name, "PS" stands for "per-
spring."
- startForceWeight : sfw (float) [create,query,edit]
Amount of the force of the spring that gets applied to the point from which the spring starts. Valid range is from 0.0
to 1.0. (Default = 1.0 )
- stiffness : s (float) [create,query,edit]
Stiffness of the springs created in the spring object. (Default = 1.0 ) -damp float Vestigial form of "damping." Please
use "damping" instead.
- stiffnessPS : sPS (float) [create,query,edit]
Stiffness of the springs created in the spring object. This will initialize all the entries in stiffnessPS to the
specified value. In both the flag and the attribute name, "PS" stands for "per-spring." (Default = 1.0 )
- strength : str (float) []
- useDampingPS : udp (bool) [create,query,edit]
Specifies whether to use dampingPS (per spring damping). If set to false, the per object damping attribute value will be
used. This flag simply sets the useDampingPS attribute of the spring shape. In both the flag and the attribute name,
"PS" stands for "per-spring." (Default = false )
- useRestLengthPS : urp (bool) [create,query,edit]
Specifies whether to use restLengthPS (per spring restLength). If set to false, the per object restLength attribute
value will be used. This flag simply sets the useRestLengthPS attribute of the spring shape. In both the flag and the
attribute name, "PS" stands for "per-spring." (Default = false )
- useStiffnessPS : usp (bool) [create,query,edit]
Specifies whether to use stiffnessPS (per spring stiffness). If set to false, the per object stiffness attribute value
will be used. This flag simply sets the useStiffnessPS attribute of the spring shape. In both the flag and the attribute
name, "PS" stands for "per-spring." (Default = false )
- walkLength : wl (int) [create]
This flag is valid only when doing wireframe creation. It will create springs between pairs of points connected by the
specified number of edges. For example, if walk length is 2, each pair of points separated by no more than 2 edges will
get a spring. Walk length measures the distance between pairs of vertices just like the number of blocks measures the
distance between two intersections in a city.
- wireframe : wf (bool) [create]
If True, sets the mode of the spring application to Wireframe. This is valid only for springs created on a soft body. It
will add springs along all edges connecting the adjacent points (vertices or CV's) of curves and surfaces. (Default is
False.)
Derived from mel command `maya.cmds.spring`
"""
pass
def particleRenderInfo(*args, **kwargs):
"""
This action provides information access to the particle render subclasses. These are derived from TdynRenderBase. This
action is used primarily by the Attribute Editor to gather information about attributes used for rendering. In query
mode, return type is based on queried flag.
Flags:
- attrList : al (int) [query]
Return the list of attributes used by this render type.
- attrListAll : ala (bool) [query]
Return a complete list of all render attributes used by the particle object. This also includes the per particle
attributes.
- name : n (int) [query]
Return the name of the render subclass using the render type.
- renderTypeCount : rtc (bool) [query]
Return the count of registered render classes for particle.Flag can appear in Create mode of commandFlag can have
multiple arguments, passed either as a tuple or a list.
Derived from mel command `maya.cmds.particleRenderInfo`
"""
pass
def getFluidAttr(*args, **kwargs):
"""
Returns values of built-in fluid attributes such as density, velocity, etc., for individual grid cells or for all cells
in the grid.
Flags:
- attribute : at (unicode) [create]
Specifies the fluid attribute for which to display values. Valid attributes are "force", "velocity", "density",
"falloff", "fuel", "color", and "temperature". (Note that getting force values is an alternate way of getting velocity
values at one time step.)
- lowerFace : lf (bool) [create]
Only valid with "-at velocity". Since velocity values are stored on the edges of each voxel and not at the center, using
voxel based indices to set velocity necessarily affects neighboring voxels. Use this flag to only set velocity
components on the lower left three faces of a voxel, rather than all six.Flag can appear in Create mode of commandFlag
can have multiple arguments, passed either as a tuple or a list.
- xIndex : xi (int) [create]
Only return values for cells with this X index
- xvalue : x (bool) []
Only get the first component of the vector-valued attribute specified by the "-at/attribute" flag.
- yIndex : yi (int) [create]
Only return values for cells with this Y index
- yvalue : y (bool) []
Only get the second component of the vector-valued attribute specified by the "-at/attribute" flag.
- zIndex : zi (int) [create]
Only return values for cells with this Z index
- zvalue : z (bool) []
Only get the third component of the vector-valued attribute specified by the "-at/attribute" flag.
Derived from mel command `maya.cmds.getFluidAttr`
"""
pass
def radial(*args, **kwargs):
"""
A radial field pushes objects directly towards or directly away from it, like a magnet. The transform is the associated
dependency node. Use connectDynamic to cause the field to affect a dynamic object. If fields are created, this command
returns the names of each of the fields. If a field was queried, the results of the query are returned. If a field was
edited, the field name is returned. If object names are provided or the active selection list is non-empty, the command
creates a field for every object in the list and calls addDynamic to add it to the object. If the list is empty, the
command defaults to -pos 0 0 0. Setting the -pos flag with objects named on the command line is an error.
Flags:
- attenuation : att (float) [query,edit]
Attentuation rate of field
- magnitude : m (float) [query,edit]
Strength of field.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which field is exerted. -1 indicates that the field has no maximum distance.
- name : n (unicode) [query,edit]
name of field
- perVertex : pv (bool) [query,edit]
Per-vertex application. If this flag is set true, then each individual point (CV, particle, vertex,etc.) of the chosen
object exerts an identical copy of the force field. If this flag is set to false, then the froce is exerted only from
the geometric center of the set of points.
- position : pos (float, float, float) [query,edit]
Position in space where you want to place a field. The field then emanates from this position in space rather than from
an object. Note that you can both use -pos (creating a field at a position) and also provide object names.
- torusSectionRadius : tsr (float) []
- type : typ (float) [query,edit]
Type of radial field (0 - 1). This controls the algorithm by which the field is attenuated. Type 1, provided for
backward compatibility, specifies the same algorithm as Alias | Wavefront Dynamation. A value between 0 and 1 yields a
linear blend.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a
list.
- volumeExclusion : vex (bool) []
- volumeOffset : vof (float, float, float) []
- volumeShape : vsh (unicode) []
- volumeSweep : vsw (float) []
Derived from mel command `maya.cmds.radial`
"""
pass
def volumeAxis(*args, **kwargs):
"""
A volume axis field can push particles in four directions, defined with respect to a volume: along the axis, away from
the axis or center, around the axis, and in a user-specified direction. These are analogous to the emission speed
controls of volume emitters. The volume axis field also contains a wind turbulence model (different from the turbulence
field) that simulates an evolving flow of liquid or gas. The turbulence has a build in animation that is driven by a
connection to a time node. The transform is the associated dependency node. Use connectDynamic to cause the field to
affect a dynamic object. If fields are created, this command returns the names of each of the fields. If a field was
queried, the results of the query are returned. If a field was edited, the field name is returned. If object names are
provided or the active selection list is non-empty, the command creates a field for every object in the list and calls
addDynamic to add it to the object. If the list is empty, the command defaults to -pos 0 0 0. Setting the -pos flag with
objects named on the command line is an error.
Flags:
- alongAxis : alx (float) [query,edit]
Initial velocity multiplier in the direction along the central axis of the volume. See the diagrams in the
documentation.
- aroundAxis : arx (float) [query,edit]
Initial velocity multiplier in the direction around the central axis of the volume. See the diagrams in the
documentation.
- attenuation : att (float) [query,edit]
Attentuation rate of field with distance. For sphere volumes, distance is computed from the center of the sphere. For
cone, cylinder, and cube volumes, it is computed from the vertical axis of the volume. For torus volumes, it is computed
from the ring in the middle of the solid portion of the torus.
- awayFromAxis : afx (float) [query,edit]
Initial velocity multiplier in the direction away from the central axis of the volume. See the diagrams in the
documentation. Used only with the cylinder, cone, and torus volumes.
- awayFromCenter : afc (float) [query,edit]
Initial velocity multiplier in the direction away from the center point of a cube or sphere volume. Used only with the
cube and sphere volumes.
- detailTurbulence : dtr (float) [query,edit]
The relative intensity of a second higher frequency turbulence. This can be used to create fine features in large scale
flows. Both the speed and the frequency on this second turbulence are higher than the primary turbulence. When the
detailTurbulence is non-zero the simulation may run a bit slower, due to the computation of a second turbulence.Flag can
appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
- directionX : dx (float) [query,edit]
x-component of force direction. Used with directional speed.
- directionY : dy (float) [query,edit]
y-component of force direction. Used with directional speed.
- directionZ : dz (float) [query,edit]
z-component of force direction. Used with directional speed.
- directionalSpeed : drs (float) [query,edit]
Adds a component of speed in the direction specified by the directionX, Y, and Z attributes.
- invertAttenuation : ia (bool) [query,edit]
If this attribute is FALSE, the default, then the attenuation makes the field's effect decrease as the affected point is
further from the volume's axis and closer to its edge. If the is set to TRUE, then the effect of the field increases in
this case, making the full effect of the field felt at the volume's edge.
- magnitude : m (float) [query,edit]
Strength of field.
- maxDistance : mxd (float) [query,edit]
Maximum distance at which field is exerted. A zero or negative value will turn off the field effect completely. For
sphere volumes, distance is computed from the center of the sphere. For cone, cylinder, and cube volumes, it is computed
from the vertical axis of the volume. For torus volumes, it is computed from the ring in the middle of the solid portion
of the torus.
- name : n (unicode) [query,edit]
name of field
- perVertex : pv (bool) [query,edit]
No effect for this type of field.
- position : pos (float, float, float) [query,edit]
Position in space where you want to place the volume.
- torusSectionRadius : tsr (float) [query,edit]
- turbulence : trb (float) [query,edit]
Adds a force simulating a turbulent wind that evolves over time.
- turbulenceFrequencyX : tfx (float) [query,edit]
The repeats of the turbulence function in X.
- turbulenceFrequencyY : tfy (float) [query,edit]
The repeats of the turbulence function in Y.
- turbulenceFrequencyZ : tfz (float) [query,edit]
The repeats of the turbulence function in Z.
- turbulenceOffsetX : tox (float) [query,edit]
The translation of the turbulence function in X.
- turbulenceOffsetY : toy (float) [query,edit]
The translation of the turbulence function in Y.
- turbulenceOffsetZ : toz (float) [query,edit]
The translation of the turbulence function in Z.
- turbulenceSpeed : trs (float) [query,edit]
The rate of change of the turbulence over time. The turbulence loops seamlessly every 1.0/turbulenceSpeed seconds. To
animate this rate attach a new time node to the time input on the volumeAxisNode then animate the time value on the time
node.
- volumeExclusion : vex (bool) [query,edit]
- volumeOffset : vof (float, float, float) [query,edit]
- volumeShape : vsh (unicode) [query,edit]
- volumeSweep : vsw (float) [query,edit]
Derived from mel command `maya.cmds.volumeAxis`
"""
pass
def nBase(*args, **kwargs):
"""
Edits one or more nBase objects. Note that nBase objects include nCloth, nRigid and nParticle objects, but the options
on this command do not currently apply to nParticle objects.
Flags:
- clearCachedTextureMap : cct (unicode) [create,edit]
Clear the cached texture map for the specified attribute from the nBase.Flag can appear in Create mode of commandFlag
can have multiple arguments, passed either as a tuple or a list.
- clearStart : cs (bool) [create,edit]
Indicates that start state should be cleared
- stuffStart : ss (bool) [create,edit]
Indicates that current state should be stuffed into the start state
- textureToVertex : ttv (unicode) [create,edit]
Transfer the texture map data for the specified attribute into the related per-vertex attribute.
Derived from mel command `maya.cmds.nBase`
"""
pass
def dynPaintEditor(*args, **kwargs):
"""
Create a editor window that can be painted into
Flags:
- activeOnly : ao (bool) [query,edit]
For Scene mode, this determines if only the active strokes will be refreshed.
- autoSave : autoSave (bool) [query,edit]
For Canvas mode, this determines if the buffer will be saved to a disk file after every stroke. Good for painting
textures and viewing the results in shaded display in the model view.
- camera : cam (unicode) [query,edit]
Sets the name of the camera which the Paint Effects panel looks through.
- canvasMode : cm (bool) [query,edit]
Sets the Paint Effects panel into Canvas mode if true.
- canvasUndo : cu (bool) [edit]
Does a fast undo in Canvas mode. This is a special undo because we are not using any history when we paint in Canvas
mode so we provide a single level undo for the Canvas.
- changeCommand : cc (unicode, unicode, unicode, unicode) [create,query,edit]
Parameters: First string: commandSecond string: editorNameThird string: editorCmdFourth string: updateFuncCall the
command when something changes in the editor The command should have this prototype :command(string $editor, string
$editorCmd, string $updateFunc, int $reason)The possible reasons could be : 0: no particular reason1: scale color2:
buffer (single/double)3: axis4: image displayed5: image saved in memory
- clear : cl (float, float, float) [edit]
Clears the buffer (if in Canvas mode) to the floating point values (R,G,B).
- control : ctl (bool) [query]
Query only. Returns the top level control for this editor. Usually used for getting a parent to attach popup menus.
Caution: It is possible, at times, for an editor to exist without a control. This flag returns "NONE" if no control is
present.
- currentCanvasSize : ccs (bool) [query]
In Query mode, this returns the (X,Y) resolution of the current canvas.Flag can appear in Create mode of commandFlag can
have multiple arguments, passed either as a tuple or a list.
- defineTemplate : dt (unicode) [create]
Puts a command in a mode where any other flags and args are parsed and added to the command template specified in the
argument. They will be used as default arguments in any subsequent invocations of the command when templateName is set
as the current template.
- displayAppearance : dsa (unicode) [query,edit]
Sets the display appearance of the model panel. Possible values are "wireframe", "points", "boundingBox",
"smoothShaded", "flatShaded". This flag may be used with the -interactive and -default flags. Note that only
"wireframe", "points", and "boundingBox" are valid for the interactive mode.
- displayFog : dfg (bool) [query,edit]
For Scene mode, this determines if fog will be displayed in the Paint Effects panel when refreshing the scene. If fog is
on, but this is off, fog will only be drawn on the strokes, not the rest of the scene.
- displayImage : di (int) [query,edit]
Set a particular image in the Editor Image Stack as the current Editor Image. Images are added to the Editor Image Stack
using the "si/saveImage" flag.
- displayLights : dsl (unicode) [query,edit]
Sets the lighting for shaded mode. Possible values are "selected", "active", "all", "default".
- displayStyle : dst (unicode) [create,query,edit]
Set the mode to display the image. Valid values are: "color" to display the basic RGB image"mask" to display the mask
channel"lum" to display the luminance of the image
- displayTextures : dtx (bool) [query,edit]
Turns on or off display of textures in shaded mode
- docTag : dtg (unicode) [create,query,edit]
Attaches a tag to the Maya editor.
- doubleBuffer : dbf (bool) [create,query,edit]
Set the display in double buffer mode
- drawAxis : da (bool) []
- drawContext : drc (bool) [query]
Returns the name of the context.
- exists : ex (bool) [create]
Returns true|false depending upon whether the specified object exists. Other flags are ignored.
- fastUpdate : fu (int) []
- fileName : fil (unicode) [query,edit]
This sets the file to which the canvas will be saved.
- filter : f (unicode) [create,query,edit]
Specifies the name of an itemFilter object to be placed on this editor. This filters the information coming onto the
main list of the editor.
- forceMainConnection : fmc (unicode) [create,query,edit]
Specifies the name of a selectionConnection object which the editor will use as its source of content. The editor will
only display items contained in the selectionConnection object. This is a variant of the -mainListConnection flag in
that it will force a change even when the connection is locked. This flag is used to reduce the overhead when using the
-unlockMainConnection , -mainListConnection, -lockMainConnection flags in immediate succession.
- highlightConnection : hlc (unicode) [create,query,edit]
Specifies the name of a selectionConnection object which the editor will synchronize with its highlight list. Not all
editors have a highlight list. For those that do, it is a secondary selection list.
- iconGrab : ig (bool) [edit]
This puts the Paint Effects panel into Grab Icon mode where the user is expected to drag out a section of the screen to
be made into an icon.
- loadImage : li (unicode) [edit]
load an image from disk and set it as the current Editor Image
- lockMainConnection : lck (bool) [create,edit]
Locks the current list of objects within the mainConnection, so that only those objects are displayed within the editor.
Further changes to the original mainConnection are ignored.
- mainListConnection : mlc (unicode) [create,query,edit]
Specifies the name of a selectionConnection object which the editor will use as its source of content. The editor will
only display items contained in the selectionConnection object.
- menu : mn (unicode) [create]
Sets the name of the script used to build a menu in the editor. The script takes the editor name as an argument.
- nbImages : nim (bool) [query]
returns the number of images
- newImage : ni (int, int, float, float, float) [query,edit]
Starts a new image in edit mode, setting the resolution to the integer values (X,Y) and clearing the buffer to the
floating point values (R,G,B). In Query mode, this returns the (X,Y) resolution of the current Image.
- paintAll : pa (float) [edit]
Redraws the buffer in current refresh mode.
- panel : pnl (unicode) [create,query]
Specifies the panel that the editor belongs to. By default if an editor is created in the create callback of a scripted
panel it will belong to that panel. If an editor doesn't belong to a panel it will be deleted when the window that it is
in is deleted.
- parent : p (unicode) [create,query,edit]
Specifies the parent layout for this editor. This flag will only have an effect if the editor is currently un-parented.
- redrawLast : rl (bool) [edit]
Redraws the last stroke again. Useful when it's brush has just changed. This feature does a fast undo and redraws the
stroke again.
- refreshMode : rmd (int) [query,edit]
Sets the refresh mode to the specified value. 0 - Do not draw strokes on refresh, 1 - Redraw strokes in wireframe mode,
2 - Redraw strokes in final rendered mode.
- removeAllImages : ra (bool) [edit]
remove all the Editor Images from the Editor Image Stack
- removeImage : ri (bool) [edit]
remove the current Editor Image from the Editor Image Stack
- rollImage : rig (float, float) [edit]
In Canvas mode, this rolls the image by the floating point values (X,Y). X and Y are between 0 (no roll) and 1 (full
roll). A value of .5 rolls the image 50% (ie. the border moves to the center of the screen.
- saveAlpha : sa (bool) [query,edit]
For Canvas mode, this determines if the alpha will be saved when storing the canvas to a disk file.
- saveBumpmap : sbm (unicode) [query,edit]
Saves the current buffer as a bump map to the specified file.
- saveImage : si (bool) [edit]
save the current Editor Image to memory. Saved Editor Images are stored in an Editor Image Stack. The most recently
saved image is stored in position 0, the second most recently saved image in position 1, and so on... To set the current
Editor Image to a previously saved image use the "di/displayImage" flag.
- scaleBlue : sb (float) [create,query,edit]
Define the scaling factor for the blue component in the View. The default value is 1 and can be between -1000 to +1000
- scaleGreen : sg (float) [create,query,edit]
Define the scaling factor for the green component in the View. The default value is 1 and can be between -1000 to +1000
- scaleRed : sr (float) [create,query,edit]
Define the scaling factor for the red component in the View. The default value is 1 and can be between -1000 to +1000
- selectionConnection : slc (unicode) [create,query,edit]
Specifies the name of a selectionConnection object which the editor will synchronize with its own selection list. As the
user selects things in this editor, they will be selected in the selectionConnection object. If the object undergoes
changes, the editor updates to show the change.
- singleBuffer : sbf (bool) [create,query,edit]
Set the display in single buffer mode
- snapShot : snp (bool) [edit]
Takes a snapshot of the current camera view.
- stateString : sts (bool) [query]
Query only flag. Returns the MEL command that will edit an editor to match the current editor state. The returned
command string uses the string variable $editorName in place of a specific name.
- swap : swp (int) []
- tileSize : ts (int) [edit]
Sets the size of the tile for the hardware texture redraw of the display buffer.
- unParent : up (bool) [create,edit]
Specifies that the editor should be removed from its layout. This cannot be used with query.
- undoCache : uc (bool) [edit]
By default the last image is cached for undo. If this is set false, then undoing will be disabled in canvas mode and
undo in scene mode will force a full refresh. Less memory will be used if this is set false before the first clear or
refresh of the current scene.
- unlockMainConnection : ulk (bool) [create,edit]
Unlocks the mainConnection, effectively restoring the original mainConnection (if it is still available), and dynamic
updates.
- updateMainConnection : upd (bool) [create,edit]
Causes a locked mainConnection to be updated from the orginal mainConnection, but preserves the lock state.
- useTemplate : ut (unicode) [create]
Force the command to use a command template other than the current one.
- wrap : wr (bool, bool) [query,edit]
For Canvas mode, should the buffer wrap in U, and V (respectively) when painting.
- writeImage : wi (unicode) [edit]
write the current Editor Image to disk
- zoom : zm (float) [query,edit]
Zooms the Canvas image by the specified value.
Derived from mel command `maya.cmds.dynPaintEditor`
"""
pass
def colorAtPoint(*args, **kwargs):
"""
The colorAtPointcommand is used to query textures or ocean shaders at passed in uv coordinates. (For ocean shaders uv is
x and z in worldspace ). The return value is a floating point array whose size is determined by either the number of
input uv arguments passed in and the the queried value. One can query alpha only, rgb only, or rgba values. The returned
array is only single indexed, so if rgb is specified then the index for red values would be index \* 3. Blue is index \*
3 + 1, and green is index \* 3 + 2. For rgba use a multiple of 4 instead of 3. For alpha only one can simply use the
index. There are two basic argument formats that may be used: colorAtPoint -u 0 -v 0 -u .2 -v .1 etc.. for all points or
colorAtPoint -mu 0 -mv 0 -xu 1 -xv 1 -su 10 -sv 10 // samples 100 points If one is sampling several points and they are
all in a regular grid formation it is more efficient to call this routine with the latter method, which uses a min/max
uv and number of samples, rather than a long argument list of uv coords. return values (-o A or RGB or RGBA)individual
UV coordinates to sample (-u float -v float)(numbers of calls to -u and -v must match)uniform grid of points to sample
(-su int -sv int)(may not use this in combination with -u or -v)bounds for sample grid (-mu float -mv float -xu float
-xv float)
Flags:
- coordU : u (float) [create]
Input u coordinate to sample texture at.
- coordV : v (float) [create]
Input v coordinate to sample texture at.
- maxU : xu (float) [create]
DEFAULT 1.0 Maximum u bounds to sample.
- maxV : xv (float) [create]
DEFAULT 1.0 Maximum v bounds to sample.
- minU : mu (float) [create]
DEFAULT 0.0 Minimum u bounds to sample.
- minV : mv (float) [create]
DEFAULT 0.0 Minimum v bounds to sample.
- output : o (unicode) [create]
Type of data to output: A = alpha only RGB = color only RGBA = color and alpha
- samplesU : su (int) [create]
DEFAULT 1 The number of points to sample in the U dimension.
- samplesV : sv (int) [create]
DEFAULT 1 The number of points to sample in the V dimension.Flag can appear in Create mode of commandFlag can have
multiple arguments, passed either as a tuple or a list.
Derived from mel command `maya.cmds.colorAtPoint`
"""
pass
def saveInitialState(*args, **kwargs):
"""
saveInitialState saves the current state of dynamics objects as the initial state. A dynamic object is a particle shape,
rigid body, rigid constraint or rigid solver. If no objects are specified, it saves the initial state for any selected
objects. It returns the names of the objects for which initial state was saved.
Flags:
- attribute : atr (unicode) [create]
Save the initial state of the specified attribute only. This is a multi-use flag.Flag can appear in Create mode of
commandFlag can have multiple arguments, passed either as a tuple or a list.
- saveall : all (bool) [create]
Save the initial state for all dynamics objects in the scene.
Derived from mel command `maya.cmds.saveInitialState`
"""
pass
def getDefaultBrush(*args, **kwargs):
"""
The command returns the name of the default Paint Effects brush.
Derived from mel command `maya.cmds.getDefaultBrush`
"""
pass
def truncateFluidCache(*args, **kwargs):
"""
This command sets the end time of a fluid cache to the current time. If the current time is less than the end time of
the cache, the cache is truncated so that only the portion of the cache up to and including the current time is
preserved. In query mode, return type is based on queried flag.
Derived from mel command `maya.cmds.truncateFluidCache`
"""
pass
def stroke(*args, **kwargs):
"""
The stroke command creates a new Paint Effects stroke node.
Flags:
- name : n (unicode) [create]
Sets the name of the stroke to the input string
- pressure : pr (bool) [create]
On creation, allows the copying of the pressure mapping settings from the Paint Effects Tool. Default is false.Flag can
appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
- seed : s (int) [create]
Sets the random seed for this stroke.
Derived from mel command `maya.cmds.stroke`
"""
pass
def fluidVoxelInfo(*args, **kwargs):
"""
Provides basic information about the mapping of a fluid voxel grid into world- or object space of the fluid. Use this
command to determine the center point of a voxel, or to find the voxel containing a given point, among other things.
Flags:
- checkBounds : cb (bool) [create]
If this flag is on, and the voxel index of a point that is out of bounds is requested, then we return nothing.
- inBounds : ib (int, int, int) [create]
Are the three ints representing the x, y, z indices of a voxel within the bounds of the fluid's voxel grid? True if yes,
false if not. (For 2D fluids, pass in z=0 for the third argument. See examples.)
- objectSpace : os (bool) [create]
Whether the queried value should be returned in object space (TRUE), or world space (FALSE, the default).
- radius : r (float) [create]
Modifier for the -voxel flag. Returns a list of index triples identifying voxels that fall within the given radius of
the point specified by the -voxel flag.Flag can appear in Create mode of commandFlag can have multiple arguments, passed
either as a tuple or a list.
- voxel : v (float, float, float) [create]
Returns array of three ints representing the x, y, z indices of the voxel within which the given point position is
contained. If the checkBounds flag is on, and the point is out of bounds, we return nothing. Otherwise, even if the
point is out of bounds, index values are returned. When combined with the -radius flag, returns an array of index
triples representing a list of voxels within a given radius of the given point position.
- voxelCenter : vc (bool) [create]
The center position of the specified voxels. Returns an array of floats (three for each of the indices in the query).
(Valid only with the -xIndex, -yIndex, and -zIndex flags.)
- xIndex : xi (int) [create]
Only return values for cells with this X index
- yIndex : yi (int) [create]
Only return values for cells with this Y index
- zIndex : zi (int) [create]
Only return values for cells with this Z index
Derived from mel command `maya.cmds.fluidVoxelInfo`
"""
pass
def loadFluid(*args, **kwargs):
"""
A command to set builtin fluid attributes such as Density, Velocity, etc for all cells in the grid from the initial
state cache In query mode, return type is based on queried flag.
Flags:
- currentTime : ct (bool) [create,query,edit]
This flag is now obsolete. Move the cache clip in the Trax editor to view different frames of the playback cache.
- frame : f (float) [create,query,edit]
This flag is now obsolete. Move the cache clip in the Trax editor to view different frames of the playback cache.Flag
can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
- initialConditions : ic (bool) [create,query,edit]
load initial conditions cache
Derived from mel command `maya.cmds.loadFluid`
"""
pass
|
11532707
|
import os
from pathlib import Path
import zipfile
DATA_FOLDER = os.path.join(os.path.dirname(__file__), "../../data/raw")
def main():
data_file = os.path.join(DATA_FOLDER, "2016-annual-report-xbrls.zip")
folder_name, _ = os.path.splitext(data_file)
if not Path(DATA_FOLDER).joinpath(folder_name).exists():
with zipfile.ZipFile(data_file) as z:
z.extractall(path=DATA_FOLDER)
if Path(data_file).exists():
os.remove(data_file)
if __name__ == "__main__":
main()
|
11532711
|
import boto3
import sys
def new_redis_cluster(clusterID, instanceSize, cacheName):
new_cluster = boto3.client('elasticache')
new_cluster.create_cache_cluster(
CacheClusterId = clusterID,
AZMode='single-az',
NumCacheNodes = 1,
CacheNodeType = instanceSize,
Engine='redis',
Tags = [
{
'Key': 'Name',
'Value': cacheName,
},
]
)
print(cacheName + ' cluster: Created')
clusterID = sys.argv[1]
instanceSize = sys.argv[2]
cacheName = sys.argv[3]
if __name__ == '__main__':
new_redis_cluster(clusterID, instanceSize, cacheName)
|
11532735
|
import numpy as np
from gensim import matutils
SLICE_SIZE = 5000 # Change this in case of Memory problems
class ClosestIndexes:
def __init__(self, similarities):
self.vectors = similarities
# Normalize just in case
norms = np.sqrt((self.vectors * self.vectors).sum(axis=1)).reshape(
self.vectors.shape[0], 1)
self.vectors /= norms
def get_closest_indexes(self, top_k, slice_size=SLICE_SIZE):
closest_indexes = []
similarity_temp = None
for i in range(self.vectors.shape[0]):
if i % slice_size == 0:
similarity_temp = np.dot(self.vectors[i:i + slice_size],
self.vectors.T)
idx_closest = matutils.argsort(similarity_temp[i % slice_size],
topn=top_k,
reverse=True)
closest_indexes.append(
[(j, (1.0 + similarity_temp[i % slice_size][j]) / 2.0)
for j in idx_closest])
return closest_indexes
|
11532736
|
class ApiConfig(object):
host_and_port = None
scheme = None
@classmethod
def splunkd_host_port(cls):
if not cls.host_and_port:
import splunk.clilib.cli_common as comm
ipAndPort = comm.getWebConfKeyValue('mgmtHostPort')
ip, port = ipAndPort.split(':')
cls.host_and_port = (ip, int(port))
return cls.host_and_port
@classmethod
def splunkd_scheme(cls):
if not cls.scheme:
import splunk.clilib.cli_common as comm
import splunk.util as splutil
enableSsl = comm.getConfKeyValue('server', 'sslConfig', 'enableSplunkdSSL')
enableSsl = splutil.normalizeBoolean(enableSsl)
cls.scheme = 'https' if enableSsl else 'http'
return cls.scheme
|
11532781
|
import cPickle
from thoonk.exceptions import *
from thoonk.feeds import Queue
class PythonQueue(Queue):
"""
A Thoonk.py addition, the PythonQueue class behaves the
same as a normal Thoonk queue, except it pickles/unpickles
items as needed.
Thoonk.py Implementation API:
put -- Add a Python object to the queue.
get -- Retrieve a Python object from the queue.
"""
def put(self, item, priority=None):
"""
Add a new item to the queue.
The item will be pickled before insertion into the queue.
(Same as self.publish())
Arguments:
item -- The content to add to the queue.
priority -- Optional priority; if equal to self.HIGH then
the item will be inserted at the head of the
queue instead of the end.
"""
item = cPickle.dumps(item)
return Queue.put(self, item, priority)
def get(self, timeout=0):
"""
Retrieve the next item from the queue.
Raises a self.Empty exception if the request times out.
The item will be unpickled before returning.
Arguments:
timeout -- Optional time in seconds to wait before
raising an exception.
"""
value = Queue.get(self, timeout)
return cPickle.loads(value)
|
11532784
|
from argparse import ArgumentParser
import torch
from model import get_openqa, add_additional_documents
from transformers.models.realm.modeling_realm import logger
from transformers.utils import logging
logger.setLevel(logging.INFO)
torch.set_printoptions(precision=8)
def get_arg_parser():
parser = ArgumentParser()
parser.add_argument("--question", type=str, required=True,
help="Input question.")
parser.add_argument("--checkpoint_pretrained_name", type=str, default=r"google/realm-orqa-nq-openqa",
help="Checkpoint name or path.")
parser.add_argument("--additional_documents_path", type=str, default=None,
help="Additional document entries for retrieval. Must be .npy format.")
return parser
def main(args):
openqa = get_openqa(args)
tokenizer = openqa.retriever.tokenizer
if args.additional_documents_path is not None:
add_additional_documents(openqa, args.additional_documents_path)
question_ids = tokenizer(args.question, return_tensors="pt").input_ids
with torch.no_grad():
outputs = openqa(
input_ids=question_ids,
return_dict=True,
)
predicted_answer = tokenizer.decode(outputs.predicted_answer_ids)
print(f"Question: {args.question}\nAnswer: {predicted_answer}")
return predicted_answer
if __name__ == "__main__":
parser = get_arg_parser()
args = parser.parse_args()
main(args)
|
11532786
|
import typing
import torch
from . import module
class _GlobalPool(torch.nn.Module):
def forward(self, inputs):
return self._pooling(inputs).reshape(inputs.shape[0], -1)
class GlobalMaxPool1d(_GlobalPool):
"""Applies a 1D global max pooling over the last dimension.
Usually used after last `Conv1d` layer to get maximum feature values
for each timestep.
Internally operates as `torch.nn.AdaptiveMaxPool1d` with redundant `1` dimensions
flattened.
Returns
-------
`torch.Tensor`
`2D` tensor `(batch, features)`
"""
def __init__(self):
super().__init__()
self._pooling = torch.nn.AdaptiveMaxPool1d(1)
class GlobalMaxPool2d(_GlobalPool):
"""Applies a 2D global max pooling over the last dimension(s).
Usually used after last `Conv2d` layer to get maximum value feature values
for each channel. Can be used on `3D` or `4D` input (though the latter is more common).
Internally operates as `torch.nn.AdaptiveMaxPool2d` with redundant `1` dimensions
flattened.
Returns
-------
`torch.Tensor`
`2D` tensor `(batch, features)`
"""
def __init__(self):
super().__init__()
self._pooling = torch.nn.AdaptiveMaxPool2d(1)
class GlobalMaxPool3d(_GlobalPool):
"""Applies a 3D global max pooling over the last dimension(s).
Usually used after last `Conv3d` layer to get maximum value feature values
for each channel. Can be used on `4D` or `5D` input (though the latter is more common).
Internally operates as `torch.nn.AdaptiveMaxPool3d` with redundant `1` dimensions
flattened.
Returns
-------
`torch.Tensor`
`2D` tensor `(batch, features)`
"""
def __init__(self):
super().__init__()
self._pooling = torch.nn.AdaptiveMaxPool3d(1)
class GlobalAvgPool1d(_GlobalPool):
"""Applies a 1D global average pooling over the last dimension.
Usually used after last `Conv1d` layer to get mean of features values
for each timestep.
Internally operates as `torch.nn.AdaptiveAvgPool1d` with redundant `1` dimensions
flattened.
Returns
-------
`torch.Tensor`
`2D` tensor `(batch, features)`
"""
def __init__(self):
super().__init__()
self._pooling = torch.nn.AdaptiveAvgPool1d(1)
class GlobalAvgPool2d(_GlobalPool):
"""Applies a 2D global average pooling over the last dimension(s).
Usually used after last `Conv2d` layer to get mean value of features values
for each channel. Can be used on `3D` or `4D` input (though the latter is more common).
Internally operates as `torch.nn.AdaptiveAvgPool3d` with redundant `1` dimensions
flattened.
Returns
-------
`torch.Tensor`
`2D` tensor `(batch, features)`
"""
def __init__(self):
super().__init__()
self._pooling = torch.nn.AdaptiveAvgPool2d(1)
class GlobalAvgPool3d(_GlobalPool):
"""Applies a 3D global average pooling over the last dimension(s).
Usually used after last `Conv3d` layer to get mean value of features values
for each channel. Can be used on `4D` or `5D` input (though the latter is more common).
Internally operates as `torch.nn.AdaptiveAvgPool3d` with redundant `1` dimensions
flattened.
Returns
-------
`torch.Tensor`
`2D` tensor `(batch, features)`
"""
def __init__(self):
super().__init__()
self._pooling = torch.nn.AdaptiveAvgPool3d(1)
class GlobalMaxPool(module.InferDimension):
"""Perform `max` pooling operation leaving maximum values from channels.
Usually used after last convolution layer (`torchlayers.Conv`)
to get pixels of maximum value from each channel.
Depending on shape of passed `torch.Tensor` either `1D`, `2D` or `3D` `GlobalMaxPool`
will be used for `3D`, `4D` and `5D` shape respectively (batch included).
Internally operates as `torchlayers.pooling.GlobalMaxPoolNd`.
Returns
-------
`torch.Tensor`
`2D` tensor `(batch, features)`
"""
def __init__(self):
super().__init__(
dispatcher={5: GlobalMaxPool3d, 4: GlobalMaxPool2d, 3: GlobalMaxPool1d}
)
class GlobalAvgPool(module.InferDimension):
"""Perform `mean` pooling operation leaving average values from channels.
Usually used after last convolution layer (`torchlayers.Conv`) to get mean
of pixels from each channel.
Depending on shape of passed `torch.Tensor` either `1D`, `2D` or `3D`
pooling will be used for `3D`, `4D` and `5D`
shape respectively (batch included).
Internally operates as `torchlayers.pooling.GlobalAvgPoolNd`.
Returns
-------
`torch.Tensor`
`2D` tensor `(batch, features)`
"""
def __init__(self):
super().__init__(
dispatcher={5: GlobalAvgPool3d, 4: GlobalAvgPool2d, 3: GlobalAvgPool1d}
)
class MaxPool(module.InferDimension):
"""Perform `max` operation across first `torch.Tensor` dimension.
Depending on shape of passed `torch.Tensor` either `torch.nn.MaxPool1D`,
`torch.nn.MaxPool2D` or `torch.nn.MaxPool3D` pooling will be used
for `3D`, `4D` and `5D` shape respectively (batch included).
Default value for `kernel_size` (`2`) was added.
Parameters
----------
kernel_size: int, optional
The size of the window to take a max over. Default: `2`
stride: int, optional
The stride of the window. Default value is :attr:`kernel_size`
padding: int, optional
Implicit zero padding to be added on both sides. Default: `0`
dilation: int
Parameter controlling the stride of elements in the window. Default: `1`
return_indices: bool, optional
If ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool` later. Default: `False`
ceil_mode: bool, optional
When True, will use `ceil` instead of `floor` to compute the output shape.
Default: `False`
Returns
-------
`torch.Tensor`
Same shape as `input` with values pooled.
"""
def __init__(
self,
kernel_size: int = 2,
stride: int = None,
padding: int = 0,
dilation: int = 1,
return_indices: bool = False,
ceil_mode: bool = False,
):
super().__init__(
dispatcher={
5: torch.nn.MaxPool3d,
4: torch.nn.MaxPool2d,
3: torch.nn.MaxPool1d,
},
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
return_indices=return_indices,
ceil_mode=ceil_mode,
)
class AvgPool(module.InferDimension):
"""Perform `avg` operation across first `torch.Tensor` dimension.
Depending on shape of passed `torch.Tensor` either `torch.nn.AvgPool1D`,
`torch.nn.AvgPool2D` or `torch.nn.AvgPool3D` pooling will be used
for `3D`, `4D` and `5D` shape respectively (batch included).
Default value for `kernel_size` (`2`) was added.
Parameters
----------
kernel_size: int, optional
The size of the window. Default: `2`
stride: int, optional
The stride of the window. Default value is :attr:`kernel_size`
padding: int, oprtional
Implicit zero padding to be added on both sides. Default: `0`
ceil_mode: bool, opriontal
When True, will use `ceil` instead of `floor` to compute the output shape.
Default: `True`
count_include_pad: bool, optional
When True, will include the zero-padding in the averaging. Default: `True`
Returns
-------
`torch.Tensor`
Same shape as `input` with values pooled.
"""
def __init__(
self,
kernel_size: int = 2,
stride: int = None,
padding: int = 0,
ceil_mode: bool = False,
count_include_pad: bool = True,
):
super().__init__(
dispatcher={
5: torch.nn.AvgPool3d,
4: torch.nn.AvgPool2d,
3: torch.nn.AvgPool1d,
},
kernel_size=kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
|
11532805
|
CAMVID = 'camvid'
KITTI = 'kitti'
camvid_color_map = {'Animal': (64, 128, 64), 'Archway': (192, 0, 128),
'Bicyclist': (0, 128, 192), 'Bridge': (0, 128, 64),
'Building': (128, 0, 0), 'Car': (64, 0, 128),
'CartLuggagePram': (64, 0, 192), 'Child': (192, 128, 64),
'Column_Pole': (192, 192, 128), 'Fence': (64, 64, 128),
'LaneMkgsDriv': (128, 0, 192), 'LaneMkgsNonDriv': (192, 0, 64),
'Misc_Text': (128, 128, 64), 'MotorcycleScooter': (192, 0, 192),
'OtherMoving': (128, 64, 64), 'ParkingBlock': (64, 192, 128),
'Pedestrian': (64, 64, 0), 'Road': (128, 64, 128),
'RoadShoulder': (128, 128, 192), 'Sidewalk': (0, 0, 192),
'SignSymbol': (192, 128, 128), 'Sky': (128, 128, 128),
'SUVPickupTruck': (64, 128, 192), 'TrafficCone': (0, 0, 64),
'TrafficLight': (0, 64, 64), 'Train': (192, 64, 128),
'Tree': (128, 128, 0), 'Truck_Bus': (192, 128, 192),
'Tunnel': (64, 0, 64), 'VegetationMisc': (192, 192, 0),
'Void': (0, 0, 0), 'Wall': (64, 192, 0)}
|
11532831
|
from .common import InfoExtractor
class MaoriTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?maoritelevision\.com/shows/(?:[^/]+/)+(?P<id>[^/?&#]+)'
_TEST = {
'url': 'https://www.maoritelevision.com/shows/korero-mai/S01E054/korero-mai-series-1-episode-54',
'md5': '5ade8ef53851b6a132c051b1cd858899',
'info_dict': {
'id': '4774724855001',
'ext': 'mp4',
'title': 'Kōrero Mai, Series 1 Episode 54',
'upload_date': '20160226',
'timestamp': 1456455018,
'description': 'md5:59bde32fd066d637a1a55794c56d8dcb',
'uploader_id': '1614493167001',
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1614493167001/HJlhIQhQf_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
brightcove_id = self._search_regex(
r'data-main-video-id=["\'](\d+)', webpage, 'brightcove id')
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
'BrightcoveNew', brightcove_id)
|
11532874
|
from django.utils import simplejson
from django.http import HttpResponse
class JsonResponse(HttpResponse):
"""Return a Json Format."""
def __init__(self, data):
HttpResponse.__init__(self,
content=simplejson.dumps(data),
content_type='application/json')
|
11532886
|
from dataclasses import dataclass, field as datafield
from typing import List
from pal.model.logical_operand import LogicalOperand
from pal.model.register_operand import RegisterOperand
@dataclass
class ExecutionContext():
""" Models the context under which an instruction may be executed """
execution_state: str = ""
""" The name of the execution state an instruction is executable in """
logical_inputs: List[LogicalOperand] = datafield(default_factory= lambda: [])
""" List of logical values this instruction takes as inputs """
logical_outputs: List[LogicalOperand] = datafield(default_factory= lambda: [])
""" List of logical values this instruction produces as outputs """
register_operands: List[RegisterOperand] = datafield(default_factory= lambda: [])
""" List of registers this instruction operates on """
|
11532928
|
from ..app import app
from ..tools import ErrorResponse, OKResponse
from ..model.user import User
@app.http_get("/api2/user/{username}")
@app.authenticated
async def get_user_byname(request):
"""
Return a user by its username.
---
description: Return a user by its name.
tags:
- Users
parameters:
- name: username
description: User name
in: path
required: true
type: string
responses:
"200":
description: Return a dict with results
schema:
type: object
properties:
username:
type: string
email:
type: string
user_id:
type: integer
is_admin:
type: boolean
"""
username = request.match_info["username"]
user = (
request.cirrina.db_session.query(User)
.filter(User.username == username)
.first()
)
if not user:
return ErrorResponse(404, "User not found")
data = {"username": user.username,
"email": user.email,
"id": user.id,
"is_admin": user.is_admin}
return OKResponse(data)
|
11532970
|
import setuptools
with open("README.md", encoding='utf-8') as f:
long_description = f.read()
version = {}
with open('alkymi/version.py') as fp:
exec(fp.read(), version)
setuptools.setup(
name="alkymi",
description="alkymi - Pythonic task automation",
version=version['__version__'],
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MathiasStokholm/alkymi",
packages=["alkymi"],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Scientific/Engineering",
],
keywords=["automation", "pipeline", "validation", "preprocessing", "make", "build", "task"],
project_urls={
"Source": "https://github.com/MathiasStokholm/alkymi/",
"Tracker": "https://github.com/MathiasStokholm/alkymi/issues",
"Documentation": "https://alkymi.readthedocs.io/en/latest/",
},
python_requires=">=3.5",
)
|
11532972
|
import torch
import torch.nn.functional as F
from combinet.src.dataset.camvid import CLASS_WEIGHT_CAMVID
from combinet.src.dataset.bacteria import CLASS_WEIGHT_BACTERIA
from combinet.src.utils import nanmean
class StreamSegMetrics():
"""
Stream Metrics for Semantic Segmentation Task
"""
def __init__(self, args):
self.args = args
self.n_classes = args.n_classes
self.confusion_matrix = torch.zeros((self.n_classes, self.n_classes))
self.entropy = AverageMeter()
self.nll = AverageMeter()
self.ece = AverageMeter()
if self.args.dataset == "camvid":
self.class_weight = CLASS_WEIGHT_CAMVID
elif self.args.dataset == "bacteria":
self.class_weight = CLASS_WEIGHT_BACTERIA
elif self.args.dataset == "random":
self.class_weight = torch.ones((args.n_classes,))
@staticmethod
def get_predictions(output):
bs, c, h, w = output.size()
tensor = output.data
_, indices = tensor.max(1)
indices = indices.view(bs, h, w)
return indices
@staticmethod
def ec_error(output, target):
_ece = 0.0
confidences, predictions = torch.max(output, 1)
accuracies = predictions.eq(target)
bin_boundaries = torch.linspace(0, 1, 10 + 1)
bin_lowers = bin_boundaries[:-1]
bin_uppers = bin_boundaries[1:]
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
in_bin = confidences.gt(bin_lower.item()) * \
confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
_ece += torch.abs(avg_confidence_in_bin -
accuracy_in_bin) * prop_in_bin
_ece = _ece if isinstance(_ece, float) else _ece.item()
return _ece
def update(self, labels, output_mean):
with torch.no_grad():
bs, c, h, w = output_mean.size()
predictions = StreamSegMetrics.get_predictions(output_mean)
for lt, lp in zip(labels, predictions):
if labels.is_cuda or predictions.is_cuda and not self.confusion_matrix.is_cuda:
self.confusion_matrix = self.confusion_matrix.to(predictions.device)
self.confusion_matrix += self._fast_hist(lt.view(-1), lp.view(-1))
nll = F.nll_loss(torch.log(output_mean+1e-8), labels, weight=self.class_weight.to(predictions.device)
if predictions.is_cuda else self.class_weight, reduction='mean').item()
ece = self.ec_error(output_mean.view(-1, self.n_classes), labels.view(-1))*100
entropy = -(torch.sum(torch.log(output_mean+1e-8)*output_mean)/(bs * h * w)).item()
self.nll.update(nll, bs * h * w)
self.ece.update(ece, bs * h * w)
self.entropy.update(entropy, bs * h * w)
def _fast_hist(self, label_true, label_pred):
mask = (label_true >= 0) & (label_true < self.n_classes)
hist = torch.bincount(
self.n_classes * label_true[mask].long() + label_pred[mask],
minlength=self.n_classes ** 2,
).reshape(self.n_classes, self.n_classes)
return hist
def get_results(self):
with torch.no_grad():
hist = self.confusion_matrix
# This is to avoid computation with respect to the background class for CamVid only
if self.args.dataset=="camvid":
hist = hist[:-1, :-1]
acc = torch.diag(hist).sum() / hist.sum()
acc_cls = torch.diag(hist) / hist.sum(dim=1)
acc_cls = nanmean(acc_cls)
iu = torch.diag(hist) / (hist.sum(dim=1) + hist.sum(dim=0) - torch.diag(hist))
mean_iu = nanmean(iu)
dice = 2 * torch.diag(hist) / (hist.sum(dim=1) + hist.sum(dim=0))
mean_dice = nanmean(dice)
return (1.-acc.item())*100, (1.-acc_cls.item())*100, mean_iu.item()*100, mean_dice.item(), self.ece.avg, self.entropy.avg, self.nll.avg
def reset(self):
self.confusion_matrix = torch.zeros((self.n_classes, self.n_classes))
self.entropy.reset()
self.nll.reset()
self.ece.reset()
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0.0
self.sum = 0.0
self.cnt = 0.0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
|
11532987
|
import os
import subprocess
import networkx as nx
from attacksurfacemeter import utilities
from attacksurfacemeter.call import Call
from attacksurfacemeter.granularity import Granularity
from attacksurfacemeter.loaders.base_loader import BaseLoader
from attacksurfacemeter.loaders.stack import Stack
class CflowLoader(BaseLoader):
""""""
def __init__(self, source, reverse=False, defenses=None,
vulnerabilities=None):
"""Constructor for CflowParser.
Parameters
----------
source : str
The absolute path to a text file containing the call graph
generated using cflow or the absolute path to a directory
containing the source files for which a call graph must be
generated using cflow.
reverse : bool
If true, the call graph is assumed to have been created using the
cflow's -r option.
defenses : list, optional
A list of Call objects, each representing a designed defense in the
system.
vulnerabilities : list, optional
A list of Call objects, each representing a vulnerable function in
the system.
"""
super(CflowLoader, self).__init__(
source, reverse, defenses, vulnerabilities
)
def load_call_graph(self, granularity=Granularity.FUNC):
"""Load a call graph generated by cflow.
If necessary, the static call graph generation utility (cflow) is
invoked to generate the call graph before attempting to load it.
Parameters
----------
granularity : str
The granularity at which the call graph must be loaded. See
attacksurfacemeter.granularity.Granularity for available choices.
Returns
-------
call_graph : networkx.DiGraph
An object representing the call graph.
"""
call_graph = nx.DiGraph()
parent = Stack()
raw_call_graph = None
if os.path.isfile(self.source):
raw_call_graph = open(self.source)
elif os.path.isdir(self.source):
raw_call_graph = self._exec_cflow()
try:
previous = Call.from_cflow(raw_call_graph.readline(), granularity)
for line in raw_call_graph:
current = Call.from_cflow(line, granularity)
if current.level > previous.level:
parent.push(previous)
elif current.level < previous.level:
for t in range(previous.level - current.level):
parent.pop()
if parent.top:
caller = callee = None
entry = exit = dangerous = defense = False
if self.is_reverse:
caller = current
callee = parent.top
else:
caller = parent.top
callee = current
(caller_attrs, callee_attrs) = utilities.get_node_attrs(
'cflow', caller, callee, self.defenses,
self.vulnerabilities
)
call_graph.add_node(caller, caller_attrs)
if callee_attrs is not None:
call_graph.add_node(callee, callee_attrs)
# Adding the edge caller -- callee
attrs = {'cflow': None, 'call': None}
call_graph.add_edge(caller, callee, attrs)
# Adding the edge callee -- caller with the assumption
# that every call must return
attrs = {'cflow': None, 'return': None}
call_graph.add_edge(callee, caller, attrs)
previous = current
finally:
if raw_call_graph:
raw_call_graph.close()
return call_graph
def _exec_cflow(self):
"""Execute cflow as a subprocess and return its output.
Parameters
----------
None
Returns
-------
stdout : file
An instance of a file object representing the output from cflow.
"""
cflow_exe = 'run_cflow.sh'
if self.is_reverse:
cflow_exe = 'run_cflow_r.sh'
dirname = os.path.dirname(os.path.realpath(__file__))
proc = subprocess.Popen(
'{0} {1}'.format(os.path.join(dirname, cflow_exe), self.source),
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True
)
return proc.stdout
|
11533023
|
from datetime import datetime
from json import JSONDecodeError
from rest_framework import status
from openbook_moderation.permissions import IsNotSuspended
from openbook_posts.models import Post
from rest_framework.views import APIView
from rest_framework.response import Response
from django.core.files.images import ImageFile
from django.utils.dateparse import parse_datetime
from rest_framework.permissions import IsAuthenticated
from django.utils.translation import ugettext_lazy as _
from openbook_importer.serializers import ZipfileSerializer
from openbook_importer.socialmedia_archive_parser.fb_parser import zip_parser
class ImportItem(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def post(self, request):
serializer = ZipfileSerializer(data=request.FILES)
serializer.is_valid(raise_exception=True)
zipfile = request.FILES['file']
try:
p = zip_parser(zipfile)
except FileNotFoundError:
return self._return_invalid()
except JSONDecodeError:
return self._return_invalid()
except TypeError:
return self._return_malicious()
if p.profile.posts:
self.save_posts(p.profile.posts, request.user)
return Response({
'message': _('done')
}, status=status.HTTP_200_OK)
def save_posts(self, posts, user):
for post in posts:
image = None
images = None
text = None
timestamp = post['timestamp']
created = datetime.fromtimestamp(timestamp)
created = parse_datetime(created.strftime('%Y-%m-%d %T+00:00'))
if 'attachments' in post.keys():
images = self._get_media_content(post)
if 'data' in post.keys() and len(post['data']) != 0:
text = post['data'][0]['post']
if images:
image = images[0]
if 'text' in image.keys():
text = image['text']
image = ImageFile(image['file'])
if not Post.objects.filter(creator=user.pk, text=text, created=created).exists():
user.create_public_post(text=text, image=image, created=created)
def _get_media_content(self, post):
images = []
image = {}
for attachment in post['attachments']:
for data in attachment['data']:
image['file'] = data['media']['uri'][1]
if 'description' in data['media'].keys():
image['text'] = data['media']['description']
images.append(image)
image = {}
return images
def _return_invalid(self):
return Response({
'message':_('invalid archive')
}, status=status.HTTP_400_BAD_REQUEST)
def _return_malicious(self):
# TODO LOG MALICIOUS ATTEMPT
print('---- POTENTIALLY MALICIOUS UPLOAD!!!')
return Response({
'message':_('invalid archive')
}, status=status.HTTP_400_BAD_REQUEST)
|
11533033
|
import pytest
import sqlalchemy
import sqlalchemy.event
import sqlalchemy_fsm
from tests.conftest import Base
class EventModel(Base):
__tablename__ = 'event_model'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
state = sqlalchemy.Column(sqlalchemy_fsm.FSMField)
def __init__(self, *args, **kwargs):
self.state = 'new'
super(EventModel, self).__init__(*args, **kwargs)
@sqlalchemy_fsm.transition(source='*', target='state_a')
def stateA(self):
pass
@sqlalchemy_fsm.transition(source='*', target='state_b')
def stateB(self):
pass
class TestEventListener(object):
@pytest.fixture
def model(self):
return EventModel()
@pytest.mark.parametrize('event_name', [
'before_state_change',
'after_state_change',
])
def test_events(self, model, event_name):
listener_result = []
def on_update(instance, source, target):
listener_result.append((source, target))
sqlalchemy.event.listen(EventModel, event_name, on_update)
expected_result = []
assert listener_result == expected_result
for handle_name in (
'state_a', 'state_b', 'state_a',
'state_a', 'state_b'
):
expected_result.append((model.state, handle_name))
if handle_name == 'state_a':
handle = model.stateA
else:
handle = model.stateB
handle.set()
assert listener_result == expected_result
# Remove the listener & check that it had an effect
sqlalchemy.event.remove(EventModel, event_name, on_update)
# Call the state handle & ensure that listener had not been called.
model.stateA.set()
assert listener_result == expected_result
def test_standard_sqlalchemy_events_still_work(self, model, session):
state_log = []
insert_log = []
@sqlalchemy.event.listens_for(EventModel, 'after_state_change')
def after_state_change(instance, source, target):
state_log.append(target)
@sqlalchemy.event.listens_for(EventModel, 'before_insert')
def before_insert(mapper, connection, target):
insert_log.append(42)
assert not state_log
assert not insert_log
model.stateA.set()
assert len(state_log) == 1
assert len(insert_log) == 0
model.stateB.set()
assert len(state_log) == 2
assert len(insert_log) == 0
session.add(model)
session.flush()
assert len(state_log) == 2
assert len(insert_log) == 1
model.stateB.set()
assert len(state_log) == 3
assert len(insert_log) == 1
class TransitionClassEventModel(Base):
__tablename__ = 'transition_class_event_model'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
state = sqlalchemy.Column(sqlalchemy_fsm.FSMField)
side_effect = sqlalchemy.Column(sqlalchemy.String)
def __init__(self, *args, **kwargs):
self.state = 'new'
super(TransitionClassEventModel, self).__init__(*args, **kwargs)
@sqlalchemy_fsm.transition(source='*', target='state_a')
def stateA(self):
pass
@sqlalchemy_fsm.transition(source='*', target='state_b')
def stateB(self):
pass
@sqlalchemy_fsm.transition(target='state_class')
class stateClass(object):
@sqlalchemy_fsm.transition(source='state_a')
def fromA(self, instance):
instance.side_effect = 'from_a'
@sqlalchemy_fsm.transition(source='state_b')
def fromB(self, instance):
instance.side_effect = 'from_b'
class TestTransitionClassEvents(object):
@pytest.fixture
def model(self):
return TransitionClassEventModel()
@pytest.mark.parametrize('event_name', [
'before_state_change',
'after_state_change',
])
def test_events(self, model, event_name):
listener_result = []
@sqlalchemy.event.listens_for(TransitionClassEventModel, event_name)
def on_update(instance, source, target):
listener_result.append(target)
expected_result = []
assert listener_result == expected_result
for handle_name in (
'state_a', 'state_b', 'state_a',
'state_a', 'state_b'
):
expected_result.append(handle_name)
if handle_name == 'state_a':
handle = model.stateA
else:
handle = model.stateB
handle.set()
assert listener_result == expected_result
model.stateClass.set()
if handle_name == 'state_a':
expected_side = 'from_a'
else:
expected_side = 'from_b'
expected_result.append('state_class')
assert model.side_effect == expected_side
assert listener_result == expected_result
# Remove the listener & check that it had an effect
sqlalchemy.event.remove(
TransitionClassEventModel, event_name, on_update)
# Call the state handle & ensure that listener had not been called.
model.stateA.set()
assert listener_result == expected_result
class TestEventsLeakage(object):
"""Ensure that multiple FSM models do not mix their events up."""
@pytest.mark.parametrize('event_name', [
'before_state_change',
'after_state_change',
])
def test_leakage(self, event_name):
event_model = EventModel()
tr_cls_model = TransitionClassEventModel()
event_result = []
tr_cls_result = []
joint_result = []
@sqlalchemy.event.listens_for(EventModel, event_name)
def on_evt_update(instance, source, target):
event_result.append(target)
@sqlalchemy.event.listens_for(TransitionClassEventModel, event_name)
def on_tr_update(instance, source, target):
tr_cls_result.append(target)
@sqlalchemy.event.listens_for(TransitionClassEventModel, event_name)
@sqlalchemy.event.listens_for(EventModel, event_name)
def on_both_update(instance, source, target):
joint_result.append(target)
assert len(event_result) == 0
assert len(tr_cls_result) == 0
assert len(joint_result) == 0
event_model.stateA.set()
assert len(event_result) == 1
assert len(tr_cls_result) == 0
assert len(joint_result) == 1
event_model.stateB.set()
assert len(event_result) == 2
assert len(tr_cls_result) == 0
assert len(joint_result) == 2
tr_cls_model.stateA.set()
assert len(event_result) == 2
assert len(tr_cls_result) == 1
assert len(joint_result) == 3
tr_cls_model.stateA.set()
assert len(event_result) == 2
assert len(tr_cls_result) == 2
assert len(joint_result) == 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.