text stringlengths 0 1.05M | meta dict |
|---|---|
"""Activates a project"""
from baseCmd import *
from baseResponse import *
class activateProjectCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""id of the project to be modified"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
self.required = ["id", ]
class activateProjectResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the id of the project"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account name of the project's owner"""
self.account = None
self.typeInfo['account'] = 'string'
"""the total number of cpu cores available to be created for this project"""
self.cpuavailable = None
self.typeInfo['cpuavailable'] = 'string'
"""the total number of cpu cores the project can own"""
self.cpulimit = None
self.typeInfo['cpulimit'] = 'string'
"""the total number of cpu cores owned by project"""
self.cputotal = None
self.typeInfo['cputotal'] = 'long'
"""the displaytext of the project"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""the domain name where the project belongs to"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the domain id the project belongs to"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the total number of public ip addresses available for this project to acquire"""
self.ipavailable = None
self.typeInfo['ipavailable'] = 'string'
"""the total number of public ip addresses this project can acquire"""
self.iplimit = None
self.typeInfo['iplimit'] = 'string'
"""the total number of public ip addresses allocated for this project"""
self.iptotal = None
self.typeInfo['iptotal'] = 'long'
"""the total memory (in MB) available to be created for this project"""
self.memoryavailable = None
self.typeInfo['memoryavailable'] = 'string'
"""the total memory (in MB) the project can own"""
self.memorylimit = None
self.typeInfo['memorylimit'] = 'string'
"""the total memory (in MB) owned by project"""
self.memorytotal = None
self.typeInfo['memorytotal'] = 'long'
"""the name of the project"""
self.name = None
self.typeInfo['name'] = 'string'
"""the total number of networks available to be created for this project"""
self.networkavailable = None
self.typeInfo['networkavailable'] = 'string'
"""the total number of networks the project can own"""
self.networklimit = None
self.typeInfo['networklimit'] = 'string'
"""the total number of networks owned by project"""
self.networktotal = None
self.typeInfo['networktotal'] = 'long'
"""the total primary storage space (in GiB) available to be used for this project"""
self.primarystorageavailable = None
self.typeInfo['primarystorageavailable'] = 'string'
"""the total primary storage space (in GiB) the project can own"""
self.primarystoragelimit = None
self.typeInfo['primarystoragelimit'] = 'string'
"""the total primary storage space (in GiB) owned by project"""
self.primarystoragetotal = None
self.typeInfo['primarystoragetotal'] = 'long'
"""the total secondary storage space (in GiB) available to be used for this project"""
self.secondarystorageavailable = None
self.typeInfo['secondarystorageavailable'] = 'string'
"""the total secondary storage space (in GiB) the project can own"""
self.secondarystoragelimit = None
self.typeInfo['secondarystoragelimit'] = 'string'
"""the total secondary storage space (in GiB) owned by project"""
self.secondarystoragetotal = None
self.typeInfo['secondarystoragetotal'] = 'long'
"""the total number of snapshots available for this project"""
self.snapshotavailable = None
self.typeInfo['snapshotavailable'] = 'string'
"""the total number of snapshots which can be stored by this project"""
self.snapshotlimit = None
self.typeInfo['snapshotlimit'] = 'string'
"""the total number of snapshots stored by this project"""
self.snapshottotal = None
self.typeInfo['snapshottotal'] = 'long'
"""the state of the project"""
self.state = None
self.typeInfo['state'] = 'string'
"""the total number of templates available to be created by this project"""
self.templateavailable = None
self.typeInfo['templateavailable'] = 'string'
"""the total number of templates which can be created by this project"""
self.templatelimit = None
self.typeInfo['templatelimit'] = 'string'
"""the total number of templates which have been created by this project"""
self.templatetotal = None
self.typeInfo['templatetotal'] = 'long'
"""the total number of virtual machines available for this project to acquire"""
self.vmavailable = None
self.typeInfo['vmavailable'] = 'string'
"""the total number of virtual machines that can be deployed by this project"""
self.vmlimit = None
self.typeInfo['vmlimit'] = 'string'
"""the total number of virtual machines running for this project"""
self.vmrunning = None
self.typeInfo['vmrunning'] = 'integer'
"""the total number of virtual machines stopped for this project"""
self.vmstopped = None
self.typeInfo['vmstopped'] = 'integer'
"""the total number of virtual machines deployed by this project"""
self.vmtotal = None
self.typeInfo['vmtotal'] = 'long'
"""the total volume available for this project"""
self.volumeavailable = None
self.typeInfo['volumeavailable'] = 'string'
"""the total volume which can be used by this project"""
self.volumelimit = None
self.typeInfo['volumelimit'] = 'string'
"""the total volume being used by this project"""
self.volumetotal = None
self.typeInfo['volumetotal'] = 'long'
"""the total number of vpcs available to be created for this project"""
self.vpcavailable = None
self.typeInfo['vpcavailable'] = 'string'
"""the total number of vpcs the project can own"""
self.vpclimit = None
self.typeInfo['vpclimit'] = 'string'
"""the total number of vpcs owned by project"""
self.vpctotal = None
self.typeInfo['vpctotal'] = 'long'
"""the list of resource tags associated with vm"""
self.tags = []
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/activateProject.py",
"copies": "1",
"size": "7598",
"license": "apache-2.0",
"hash": -8098021154133229000,
"line_mean": 42.9190751445,
"line_max": 94,
"alpha_frac": 0.6138457489,
"autogenerated": false,
"ratio": 4.386836027713626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5500681776613626,
"avg_score": null,
"num_lines": null
} |
"""Activates arm mirror, changes eyes and says a short sentence every few seconds"""
from time import sleep
from random import randint
import subprocess
import threading
from Movements import Movements
from Audio import Audio
from Eyes import Eyes
MOVEMENTS = Movements()
AUDIO = Audio()
EYES = Eyes()
def arm_mirror():
"""Makes the angle of beo's left arm match the angle of beo's right arm"""
MOVEMENTS.disable_all_joints()
while True:
for i in range(3):
angle = MOVEMENTS.get_raw_angle(i*2)
MOVEMENTS.set_raw_angle(i*2 +1, angle)
sleep(0.01)
def eye_change():
"""Changes beo's eye expressions every few seconds"""
expressions = ['wink', 'shut', 'sad', 'mad', 'default']
while True:
for i in expressions:
EYES.set_expression(i)
sleep(20)
def speak():
"""Says a short sentence every few seconds"""
sentences = ['DESTROY ALL HU- I MEAN GREETINGS MEAT BAG',
'She sells sea shells by the sea shore', 'Other sentence']
while True:
AUDIO.speak(sentences[randint(0, 2)])
sleep(15)
def nod():
"""Moves beo's had back and forth every few seconds"""
while True:
MOVEMENTS.set_raw_angle(7, 52)
sleep(2)
MOVEMENTS.set_raw_angle(7, 0)
sleep(2)
def camera():
"""Takes a picture every minute"""
while True:
subprocess.check_output(['fswebcam', 'image.jpg'])
sleep(60)
def main():
"""Main function, creates the threads"""
thread1 = threading.Thread(target=arm_mirror)
thread2 = threading.Thread(target=eye_change)
thread3 = threading.Thread(target=speak)
thread4 = threading.Thread(target=nod)
thread5 = threading.Thread(target=camera)
#Starts the threads.
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread5.start()
#Joins the threads
thread1.join()
thread2.join()
thread3.join()
thread4.join()
thread5.join()
if __name__ == "__main__":
main()
| {
"repo_name": "CruyeEblon/Programming_Classes",
"path": "beo_threads.py",
"copies": "1",
"size": "2041",
"license": "mit",
"hash": 4597570212427876400,
"line_mean": 26.2133333333,
"line_max": 84,
"alpha_frac": 0.6291033807,
"autogenerated": false,
"ratio": 3.4770017035775127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46061050842775125,
"avg_score": null,
"num_lines": null
} |
ACTIVATE_SCRIPT = """# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "$_OLD_VIRTUAL_PATH" ] ; then
PATH="$_OLD_VIRTUAL_PATH"
export PATH
unset _OLD_VIRTUAL_PATH
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
PYTHONPATH="$_OLD_PYTHONPATH"
export PYTHONPATH
unset _OLD_PYTHONPATH
if [ -n "$_OLD_VIRTUAL_PS1" ] ; then
PS1="$_OLD_VIRTUAL_PS1"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
if [ ! "$1" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelavent variables
deactivate nondestructive
VIRTUAL_ENV="{{ venv_path }}"
export VIRTUAL_ENV
VIRTUAL_ENV_PYRUN_VERSION="{{ pyrun_version }}"
export VIRTUAL_ENV_PYRUN_VERSION
_OLD_PYTHONPATH="$PYTHONPATH"
PYTHONPATH="$VIRTUAL_ENV/pip:$PYTHONPATH"
export PYTHONPATH
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
if [ -z "$VIRTUAL_ENV_DISABLE_PROMPT" ] ; then
_OLD_VIRTUAL_PS1="$PS1"
if [ "x({{ venv_name }}) " != x ] ; then
PS1="({{ venv_name }}) $PS1"
else
if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1"
else
PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1"
fi
fi
export PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
"""
PIP_SCRIPT = """#!/usr/bin/env python
import os
import sys
import pkg_resources
import pip
import pip._vendor.pkg_resources
eggdir = os.path.join(os.environ['VIRTUAL_ENV'], 'pip')
try:
pkg_resources.working_set.entries.remove(eggdir)
except ValueError:
pass
try:
pip._vendor.pkg_resources.working_set.entries.remove(eggdir)
except ValueError:
pass
for p in ('setuptools', 'pip'):
pkg_resources.working_set.by_key.pop(p, None)
pip._vendor.pkg_resources.working_set.by_key.pop(p, None)
sys.exit(pip.main())
"""
| {
"repo_name": "mbachry/exxo",
"path": "exxo/venv.py",
"copies": "1",
"size": "2558",
"license": "isc",
"hash": 6727654097634589000,
"line_mean": 24.8383838384,
"line_max": 84,
"alpha_frac": 0.6426896013,
"autogenerated": false,
"ratio": 3.1658415841584158,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9306281105098403,
"avg_score": 0.0004500160720025715,
"num_lines": 99
} |
activate_this = "/Users/stefano.romano/DataScience/bin/activate_this.py"
execfile(activate_this, dict(__file__ = activate_this))
import numpy as np
import pymc as pm
from matplotlib import pyplot as plt
data = np.loadtxt("data/mixture_data.csv")
plt.clf()
plt.hist(data, histtype = "stepfilled", color = "g", alpha = 0.4, bins = 20)
plt.ylim(0, 35)
## Data generation model ------------------------------
p = pm.Uniform("p", 0, 1, value = 0.9)
assignment = pm.Categorical("assignment", [p, 1 - p], size = len(data))
sigmas = pm.Uniform("sigmas", 0, 100, size = 2)
taus = 1.0/(sigmas**2)
mus = pm.Normal("mus", [120, 190], [0.01, 0.01])
@pm.deterministic
def tau_i(assignment = assignment, taus = taus):
return taus[assignment]
@pm.deterministic
def mu_i(assignment = assignment, mus = mus):
return mus[assignment]
observations = pm.Normal("obs", mu_i, tau_i, value = data, observed = True)
model = pm.Model([p, assignment, sigmas, mus, observations])
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(50000)
mus_samples = mcmc.trace("mus")[:]
sigmas_samples = mcmc.trace("sigmas")[:]
assignment_samples = mcmc.trace("assignment")[:]
plt.clf()
colors = ["#348ABD", "#A60628"]
for i in range(2):
plt.plot(mus_samples[:, i], color = colors[i], label = r"Trace of $\mu_%d$" %i)
plt.legend()
plt.clf()
for i in range(2):
plt.plot(sigmas_samples[:, i], color = colors[i], label = r"Trace of $\sigma_%d$" %i)
plt.legend()
for i in range(2):
plt.hist(mus_samples[:, i], histtype = "stepfilled", color = colors[i],
label = r"Posterior samples of $\mu_%d$" %i, alpha = 0.7)
plt.legend()
plt.clf()
for i in range(2):
plt.hist(stds_samples[:, i], histtype = "stepfilled", color = colors[i],
label = r"Posterior samples of $\sigma_%d$" %i, alpha = 0.7)
plt.legend()
cluster1_freq = assignment_samples.sum(axis = 1)/float(assignment_samples.shape[1])
plt.clf()
plt.plot(cluster1_freq, color = "g", lw = 3)
plt.ylim(0, 1)
# Continue sampling
mcmc.sample(100000)
mus_samples = mcmc.trace("mus", chain = 1)[:]
prev_mus_samples = mcmc.trace("mus", chain = 0)[:]
cluster1_probs = assignment_samples.mean(axis = 0)
from pymc.Matplot import plot as mcplot
mcplot(mcmc.trace("mus"), common_scale = False)
| {
"repo_name": "steromano/BayesianMethodsForHackers",
"path": "chapter3.py",
"copies": "1",
"size": "2276",
"license": "mit",
"hash": 72851226892756300,
"line_mean": 26.756097561,
"line_max": 89,
"alpha_frac": 0.6458699473,
"autogenerated": false,
"ratio": 2.69988137603796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.384575132333796,
"avg_score": null,
"num_lines": null
} |
""" Activation Factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import Union, Callable, Type
from .activations import *
from .activations_jit import *
from .activations_me import *
from .config import is_exportable, is_scriptable, is_no_jit
# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7.
# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present.
# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used.
_has_silu = 'silu' in dir(torch.nn.functional)
_has_hardswish = 'hardswish' in dir(torch.nn.functional)
_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional)
_has_mish = 'mish' in dir(torch.nn.functional)
_ACT_FN_DEFAULT = dict(
silu=F.silu if _has_silu else swish,
swish=F.silu if _has_silu else swish,
mish=F.mish if _has_mish else mish,
relu=F.relu,
relu6=F.relu6,
leaky_relu=F.leaky_relu,
elu=F.elu,
celu=F.celu,
selu=F.selu,
gelu=gelu,
sigmoid=sigmoid,
tanh=tanh,
hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid,
hard_swish=F.hardswish if _has_hardswish else hard_swish,
hard_mish=hard_mish,
)
_ACT_FN_JIT = dict(
silu=F.silu if _has_silu else swish_jit,
swish=F.silu if _has_silu else swish_jit,
mish=F.mish if _has_mish else mish_jit,
hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit,
hard_swish=F.hardswish if _has_hardswish else hard_swish_jit,
hard_mish=hard_mish_jit
)
_ACT_FN_ME = dict(
silu=F.silu if _has_silu else swish_me,
swish=F.silu if _has_silu else swish_me,
mish=F.mish if _has_mish else mish_me,
hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me,
hard_swish=F.hardswish if _has_hardswish else hard_swish_me,
hard_mish=hard_mish_me,
)
_ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT)
for a in _ACT_FNS:
a.setdefault('hardsigmoid', a.get('hard_sigmoid'))
a.setdefault('hardswish', a.get('hard_swish'))
_ACT_LAYER_DEFAULT = dict(
silu=nn.SiLU if _has_silu else Swish,
swish=nn.SiLU if _has_silu else Swish,
mish=nn.Mish if _has_mish else Mish,
relu=nn.ReLU,
relu6=nn.ReLU6,
leaky_relu=nn.LeakyReLU,
elu=nn.ELU,
prelu=PReLU,
celu=nn.CELU,
selu=nn.SELU,
gelu=GELU,
sigmoid=Sigmoid,
tanh=Tanh,
hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid,
hard_swish=nn.Hardswish if _has_hardswish else HardSwish,
hard_mish=HardMish,
)
_ACT_LAYER_JIT = dict(
silu=nn.SiLU if _has_silu else SwishJit,
swish=nn.SiLU if _has_silu else SwishJit,
mish=nn.Mish if _has_mish else MishJit,
hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit,
hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit,
hard_mish=HardMishJit
)
_ACT_LAYER_ME = dict(
silu=nn.SiLU if _has_silu else SwishMe,
swish=nn.SiLU if _has_silu else SwishMe,
mish=nn.Mish if _has_mish else MishMe,
hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe,
hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe,
hard_mish=HardMishMe,
)
_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT)
for a in _ACT_LAYERS:
a.setdefault('hardsigmoid', a.get('hard_sigmoid'))
a.setdefault('hardswish', a.get('hard_swish'))
def get_act_fn(name: Union[Callable, str] = 'relu'):
""" Activation Function Factory
Fetching activation fns by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if isinstance(name, Callable):
return name
if not (is_no_jit() or is_exportable() or is_scriptable()):
# If not exporting or scripting the model, first look for a memory-efficient version with
# custom autograd, then fallback
if name in _ACT_FN_ME:
return _ACT_FN_ME[name]
if is_exportable() and name in ('silu', 'swish'):
# FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack
return swish
if not (is_no_jit() or is_exportable()):
if name in _ACT_FN_JIT:
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name]
def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'):
""" Activation Layer Factory
Fetching activation layers by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if isinstance(name, type):
return name
if not (is_no_jit() or is_exportable() or is_scriptable()):
if name in _ACT_LAYER_ME:
return _ACT_LAYER_ME[name]
if is_exportable() and name in ('silu', 'swish'):
# FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack
return Swish
if not (is_no_jit() or is_exportable()):
if name in _ACT_LAYER_JIT:
return _ACT_LAYER_JIT[name]
return _ACT_LAYER_DEFAULT[name]
def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs):
act_layer = get_act_layer(name)
if act_layer is None:
return None
return act_layer(**kwargs) if inplace is None else act_layer(inplace=inplace, **kwargs)
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/models/layers/create_act.py",
"copies": "1",
"size": "5359",
"license": "apache-2.0",
"hash": -6020964667966505000,
"line_mean": 34.0261437908,
"line_max": 105,
"alpha_frac": 0.6762455682,
"autogenerated": false,
"ratio": 2.8673087212413053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4043554289441305,
"avg_score": null,
"num_lines": null
} |
""" Activation functions implemented in NumPy
"""
# Sebastian Raschka 2016-2017
#
# ann is a supporting package for the book
# "Introduction to Artificial Neural Networks and Deep Learning:
# A Practical Guide with Applications in Python"
#
# Author: Sebastian Raschka <sebastianraschka.com>
#
# License: MIT
import numpy as np
def linear_activation(x):
return x
def linear_derivative(x):
return 1
def logistic_activation(x):
""" Logistic sigmoid activation function
Parameters
----------
x : numpy array, shape=(n_samples, )
Input values (e.g., x.dot(weights) + bias)
Returns
----------
float
1 / ( 1 + exp(x)
Examples
----------
>>> logistic_activation(np.array([-1, 0, 1]))
array([ 0.26894142, 0.5 , 0.73105858])
>>>
"""
return 1. / (1. + np.exp(-np.clip(x, -250, 250)))
def logistic_derivative(x):
""" Derivative of the logistic sigmoid activation function
Parameters
----------
x : numpy array, shape=(n_samples, )
Input values
Returns
----------
float
logistic(x) * (1 - logistic(x))
Examples
----------
>>> logistic_derivative(np.array([-1, 0, 1]))
array([ 0.19661193, 0.25 , 0.19661193])
>>>
"""
x_logistic = logistic_activation(x)
return x_logistic * (1. - x_logistic)
def logistic_derivative_from_logistic(x_logistic):
""" Derivative of the logistic sigmoid activation function
Parameters
----------
x_logistic : numpy array, shape=(n_samples, )
Output from precomputed logistic activation to save a computational
step for efficiency.
Returns
----------
float
x_logistic * (1 - x_logistic)
Examples
----------
>>> logistic_derivative_from_logistic(np.array([0.26894142,
... 0.5, 0.73105858]))
array([ 0.19661193, 0.25 , 0.19661193])
>>>
"""
return x_logistic * (1. - x_logistic)
def tanh_activation(x):
""" Hyperbolic tangent (tanh sigmoid) activation function
Parameters
----------
x : numpy array, shape=(n_samples, )
Input values (e.g., x.dot(weights) + bias)
Returns
----------
float
(exp(x) - exp(-x)) / (e(x) + e(-x))
Examples
----------
>>> tanh_activation(np.array([-10, 0, 10]))
array([-1., 0., 1.])
>>>
"""
return np.tanh(x)
def tanh_derivative(x):
""" Derivative of the hyperbolic tangent (tanh sigmoid) activation function
Parameters
----------
x : numpy array, shape=(n_samples, )
Input values
Returns
----------
float
1 - tanh(x)**2
Examples
----------
>>> tanh_derivative(np.array([-10, 0, 10]))
array([ 8.24461455e-09, 1.00000000e+00, 8.24461455e-09])
>>>
"""
return 1. - tanh_activation(x)**2
def tanh_derivative_from_tanh(x_tanh):
""" Derivative of the hyperbolic tangent (tanh sigmoid) activation function
Parameters
----------
x_tanh : numpy array, shape=(n_samples, )
Output from precomputed tanh to save a computational
step for efficiency.
Returns
----------
float
1 - tanh(x)**2
Examples
----------
>>> tanh_derivative_from_tanh(np.array([-10, 0, 10]))
array([-99., 1., -99.])
>>>
"""
return 1. - x_tanh**2
def relu_activation(x):
""" REctified Linear Unit activation function
Parameters
----------
x : numpy array, shape=(n_samples, )
Input values (e.g., x.dot(weights) + bias)
Returns
----------
float
max(0, x)
Examples
----------
>>> relu_activation(np.array([-1., 0., 2.]))
array([-0., 0., 2.])
>>>
"""
return x * (x > 0)
def relu_derivative(x):
""" Derivative of the REctified Linear Unit activation function
Parameters
----------
x : numpy array, shape=(n_samples, )
Input values
Returns
----------
float
1 if x > 0; 0, otherwise.
Examples
----------
>>> relu_derivative(np.array([-1., 0., 2.]))
array([ 0., 0., 1.])
>>>
"""
return 1. * (x > 0)
def softplus_activation(x):
""" Softplus activation function
Parameters
----------
x : numpy array, shape=(n_samples, )
Input values (e.g., x.dot(weights) + bias)
Returns
----------
float
log(1 + exp(x))
Examples
----------
>>> softplus_activation(np.array([-5., -1., 0., 2.]))
array([ 0.00671535, 0.31326169, 0.69314718, 2.12692801])
>>>
"""
return np.log(1. + np.exp(x))
def softplus_derivative(x):
""" Derivative of the softplus activation function
Parameters
----------
x : numpy array, shape=(n_samples, )
Input values
Returns
----------
float
logistic_sigmoid(x)
Examples
----------
>>> softplus_derivative(np.array([-1., 0., 1.]))
array([ 0.26894142, 0.5 , 0.73105858])
>>>
"""
return logistic_activation(x)
def softmax_activation(x):
""" Softmax activation function
Parameters
----------
x : numpy array, shape=(n_samples, n_classes)
Input values
Returns
----------
array, shape=(n_samples, n_classes)
exp(x) / sum(exp(x))
Examples
----------
>>> softmax_activation(np.array([2.0, 1.0, 0.1]))
array([[ 0.65900114, 0.24243297, 0.09856589]])
>>> softmax_activation(np.array([[2.0, 1.0, 0.1],\
[1.0, 2.0, 0.1],\
[0.1, 1.0, 2.0],\
[2.0, 1.0, 0.1]]))
array([[ 0.65900114, 0.24243297, 0.09856589],
[ 0.24243297, 0.65900114, 0.09856589],
[ 0.09856589, 0.24243297, 0.65900114],
[ 0.65900114, 0.24243297, 0.09856589]])
"""
# np.exp(x) / np.sum(np.exp(x), axis=0)
if x.ndim == 1:
x = x.reshape([1, x.size])
denom = np.exp(x - np.max(x, 1).reshape([x.shape[0], 1]))
return denom / np.sum(denom, axis=1).reshape([denom.shape[0], 1])
def softmax_derivative(x):
""" Derivative of the softplus activation function
Parameters
----------
x : numpy array, shape=(n_samples, n_classes)
Input values
Returns
----------
numpy array, shape=(n_samples, n_classes)
Examples
----------
>>> softmax_derivative(np.array([[1., 2., 3.],\
[4., 5., 6.]]))
array([[ -0.08192507, -2.18483645, -6.22269543],
[-12.08192507, -20.18483645, -30.22269543]])
>>>
"""
x_softmax = softmax_activation(x)
jacobian = - x_softmax[:, :, np.newaxis] * x_softmax[:, np.newaxis, :]
v_idx, h_idx = np.diag_indices(jacobian[1].shape[0])
jacobian[:, v_idx, h_idx] = x * (1. - x)
return jacobian.sum(axis=1)
def softmax_logloss_derivative(predictions, targets):
""" Derivative of the softmax activation function with log loss
Parameters
----------
x : numpy array, shape=(n_samples, n_classes)
Input values
Returns
----------
array, shape=(n_samples, n_classes)
predictions - targets
"""
return predictions - targets
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "othersite/document",
"path": "machinelearning/deep-learning-book/ann/np/activations.py",
"copies": "1",
"size": "7369",
"license": "apache-2.0",
"hash": 1284922839227823900,
"line_mean": 21.7438271605,
"line_max": 79,
"alpha_frac": 0.5215090243,
"autogenerated": false,
"ratio": 3.46939736346516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.449090638776516,
"avg_score": null,
"num_lines": null
} |
""" Activations (memory-efficient w/ custom autograd)
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
These activations are not compatible with jit scripting or ONNX export of the model, please use either
the JIT or basic versions of the activations.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
@torch.jit.script
def swish_jit_fwd(x):
return x.mul(torch.sigmoid(x))
@torch.jit.script
def swish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))
class SwishJitAutoFn(torch.autograd.Function):
""" torch.jit.script optimised Swish w/ memory-efficient checkpoint
Inspired by conversation btw Jeremy Howard & Adam Pazske
https://twitter.com/jeremyphoward/status/1188251041835315200
"""
@staticmethod
def symbolic(g, x):
return g.op("Mul", x, g.op("Sigmoid", x))
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return swish_jit_bwd(x, grad_output)
def swish_me(x, inplace=False):
return SwishJitAutoFn.apply(x)
class SwishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(SwishMe, self).__init__()
def forward(self, x):
return SwishJitAutoFn.apply(x)
@torch.jit.script
def mish_jit_fwd(x):
return x.mul(torch.tanh(F.softplus(x)))
@torch.jit.script
def mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishJitAutoFn(torch.autograd.Function):
""" Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
A memory efficient, jit scripted variant of Mish
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return mish_jit_bwd(x, grad_output)
def mish_me(x, inplace=False):
return MishJitAutoFn.apply(x)
class MishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(MishMe, self).__init__()
def forward(self, x):
return MishJitAutoFn.apply(x)
@torch.jit.script
def hard_sigmoid_jit_fwd(x, inplace: bool = False):
return (x + 3).clamp(min=0, max=6).div(6.)
@torch.jit.script
def hard_sigmoid_jit_bwd(x, grad_output):
m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6.
return grad_output * m
class HardSigmoidJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_sigmoid_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_sigmoid_jit_bwd(x, grad_output)
def hard_sigmoid_me(x, inplace: bool = False):
return HardSigmoidJitAutoFn.apply(x)
class HardSigmoidMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoidMe, self).__init__()
def forward(self, x):
return HardSigmoidJitAutoFn.apply(x)
@torch.jit.script
def hard_swish_jit_fwd(x):
return x * (x + 3).clamp(min=0, max=6).div(6.)
@torch.jit.script
def hard_swish_jit_bwd(x, grad_output):
m = torch.ones_like(x) * (x >= 3.)
m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m)
return grad_output * m
class HardSwishJitAutoFn(torch.autograd.Function):
"""A memory efficient, jit-scripted HardSwish activation"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_swish_jit_bwd(x, grad_output)
@staticmethod
def symbolic(g, self):
input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float)))
hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float)))
hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float)))
return g.op("Mul", self, hardtanh_)
def hard_swish_me(x, inplace=False):
return HardSwishJitAutoFn.apply(x)
class HardSwishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwishMe, self).__init__()
def forward(self, x):
return HardSwishJitAutoFn.apply(x)
@torch.jit.script
def hard_mish_jit_fwd(x):
return 0.5 * x * (x + 2).clamp(min=0, max=2)
@torch.jit.script
def hard_mish_jit_bwd(x, grad_output):
m = torch.ones_like(x) * (x >= -2.)
m = torch.where((x >= -2.) & (x <= 0.), x + 1., m)
return grad_output * m
class HardMishJitAutoFn(torch.autograd.Function):
""" A memory efficient, jit scripted variant of Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_mish_jit_bwd(x, grad_output)
def hard_mish_me(x, inplace: bool = False):
return HardMishJitAutoFn.apply(x)
class HardMishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMishMe, self).__init__()
def forward(self, x):
return HardMishJitAutoFn.apply(x)
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/models/layers/activations_me.py",
"copies": "1",
"size": "5886",
"license": "apache-2.0",
"hash": 2708939029053903400,
"line_mean": 26,
"line_max": 163,
"alpha_frac": 0.6493374108,
"autogenerated": false,
"ratio": 2.983274201723264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.910809944255156,
"avg_score": 0.004902433994340794,
"num_lines": 218
} |
""" Activations
A collection of activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
def mish(x, inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
NOTE: I don't have a working inplace variant
"""
return x.mul(F.softplus(x).tanh())
class Mish(nn.Module):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
def forward(self, x):
return mish(x)
def sigmoid(x, inplace: bool = False):
return x.sigmoid_() if inplace else x.sigmoid()
# PyTorch has this, but not with a consistent inplace argmument interface
class Sigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
def tanh(x, inplace: bool = False):
return x.tanh_() if inplace else x.tanh()
# PyTorch has this, but not with a consistent inplace argmument interface
class Tanh(nn.Module):
def __init__(self, inplace: bool = False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def hard_swish(x, inplace: bool = False):
inner = F.relu6(x + 3.).div_(6.)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
def hard_mish(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
if inplace:
return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
else:
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_mish(x, self.inplace)
class PReLU(nn.PReLU):
"""Applies PReLU (w/ dummy inplace arg)
"""
def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None:
super(PReLU, self).__init__(num_parameters=num_parameters, init=init)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.prelu(input, self.weight)
def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
return F.gelu(x)
class GELU(nn.Module):
"""Applies the Gaussian Error Linear Units function (w/ dummy inplace arg)
"""
def __init__(self, inplace: bool = False):
super(GELU, self).__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.gelu(input)
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/models/layers/activations.py",
"copies": "1",
"size": "4040",
"license": "apache-2.0",
"hash": -4735379133499598000,
"line_mean": 26.8620689655,
"line_max": 107,
"alpha_frac": 0.629950495,
"autogenerated": false,
"ratio": 3.2899022801302933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44198527751302935,
"avg_score": null,
"num_lines": null
} |
""" Activations
A collection of jit-scripted activations fn and modules with a common interface so that they can
easily be swapped. All have an `inplace` arg even if not used.
All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not
currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted
versions if they contain in-place ops.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from torch import nn as nn
from torch.nn import functional as F
@torch.jit.script
def swish_jit(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul(x.sigmoid())
@torch.jit.script
def mish_jit(x, _inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
return x.mul(F.softplus(x).tanh())
class SwishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(SwishJit, self).__init__()
def forward(self, x):
return swish_jit(x)
class MishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(MishJit, self).__init__()
def forward(self, x):
return mish_jit(x)
@torch.jit.script
def hard_sigmoid_jit(x, inplace: bool = False):
# return F.relu6(x + 3.) / 6.
return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
class HardSigmoidJit(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoidJit, self).__init__()
def forward(self, x):
return hard_sigmoid_jit(x)
@torch.jit.script
def hard_swish_jit(x, inplace: bool = False):
# return x * (F.relu6(x + 3.) / 6)
return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
class HardSwishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwishJit, self).__init__()
def forward(self, x):
return hard_swish_jit(x)
@torch.jit.script
def hard_mish_jit(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMishJit, self).__init__()
def forward(self, x):
return hard_mish_jit(x)
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/models/layers/activations_jit.py",
"copies": "1",
"size": "2529",
"license": "apache-2.0",
"hash": 2376385783565350000,
"line_mean": 27.1,
"line_max": 107,
"alpha_frac": 0.6611308818,
"autogenerated": false,
"ratio": 3.0691747572815533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9226911226830443,
"avg_score": 0.0006788824502218664,
"num_lines": 90
} |
"""Active inference project code
Oswald Berthold, 2016-2017
This file contains the learners which can be used as adaptive models of
sensorimotor contexts. For forward models there are
- nearest neighbour
- sparse online gaussian process models powered by Harold Soh's OTL library
- gaussian mixture model
- hebbian connected SOM
TODO: common calling convention for all model types
- including 'predict_naive' and 'predict_full' methods that would capture
returning confidences about the current prediction
- other variables that might be used by the context to modulate
exploration, learning and behaviour
- disambiguate static and dynamic (conditional inference types) idim/odim
TODO: consistency problem when sampling from probabilistic models (gmm, hebbsom, ...)
issues:
- som track residual error from map training
- som use residual for adjusting rbf width
- som extend sampling to sample actual prediction from gaussian with
unit's mu and sigma
- plot current / final som configuration
- plot densities
"""
from __future__ import print_function
import numpy as np
import scipy.sparse as sparse
import pylab as pl
import matplotlib.gridspec as gridspec
import cPickle
from functools import partial
# KNN
from sklearn.neighbors import KNeighborsRegressor
# Online Gaussian Processes
try:
from otl_oesgp import OESGP
from otl_storkgp import STORKGP
HAVE_SOESGP = True
except ImportError, e:
print("couldn't import online GP models:", e)
HAVE_SOESGP = False
# Gaussian mixtures
try:
import pypr.clustering.gmm as gmm
except ImportError, e:
print("Couldn't import pypr.clustering.gmm", e)
# hebbsom
try:
from kohonen.kohonen import Map, Parameters, ExponentialTimeseries, ConstantTimeseries
from kohonen.kohonen import Gas, GrowingGas, GrowingGasParameters, Filter
except ImportError, e:
print("Couldn't import lmjohns3's kohonon SOM lib", e)
model_classes = ["KNN", "SOESGP", "STORKGP", "GMM", "HebbSOM", "all"]
class ActInfModel(object):
"""Base class for active inference function approximators / regressors"""
def __init__(self, idim = 1, odim = 1, numepisodes = 10):
self.model = None
self.idim = idim
self.odim = odim
self.numepisodes = numepisodes
def bootstrap(self): None
def predict(self, X):
if self.model is None:
print("%s.predict: implement me" % (self.__class__.__name__))
return np.zeros((1, self.odim))
def fit(self, X, Y):
if self.model is None:
print("%s.fit: implement me" % (self.__class__.__name__))
def save(self, filename):
cPickle.dump(self, open(filename, "wb"))
@classmethod
def load(cls, filename):
return cPickle.load(open(filename, "rb"))
class ActInfKNN(ActInfModel):
"""k-NN function approximator for active inference"""
def __init__(self, idim = 1, odim = 1):
self.fwd = KNeighborsRegressor(n_neighbors=5)
ActInfModel.__init__(self, idim, odim)
self.X_ = []
self.y_ = []
self.bootstrap()
def bootstrap(self):
# bootstrap model
print("%s.bootstrap'ping" % (self.__class__.__name__))
for i in range(10):
if self.idim == self.odim:
self.X_.append(np.ones((self.idim, )) * i * 0.1)
self.y_.append(np.ones((self.odim, )) * i * 0.1)
else:
self.X_.append(np.random.uniform(-0.1, 0.1, (self.idim,)))
self.y_.append(np.random.uniform(-0.1, 0.1, (self.odim,)))
# print(self.X_, self.y_)
self.fwd.fit(self.X_, self.y_)
def predict(self, X):
return self.fwd.predict(X)
def fit(self, X, y):
if X.shape[0] > 1: # batch of data
return self.fit_batch(X, y)
self.X_.append(X[0,:])
# self.y_.append(self.m[0,:])
# self.y_.append(self.goal[0,:])
self.y_.append(y[0,:])
# print("len(X_), len(y_)", len(self.X_), len(self.y_))
self.fwd.fit(self.X_, self.y_)
def fit_batch(self, X, y):
self.X_ = X.tolist()
self.y_ = y.tolist()
self.fwd.fit(self.X_, self.y_)
################################################################################
# ActiveInference OTL library based model, base class implementing predict,
# predict_step (otl can't handle batches), fit, save and load methods
class ActInfOTLModel(ActInfModel):
"""sparse online echo state gaussian process function approximator
for active inference"""
def __init__(self, idim = 1, odim = 1):
ActInfModel.__init__(self, idim, odim)
self.otlmodel_type = "soesgp"
self.otlmodel = None
def predict(self, X):
if X.shape[0] > 1: # batch input
ret = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
ret[i] = self.predict_step(X[i].flatten().tolist())
return ret
else:
X_ = X.flatten().tolist()
return self.predict_step(X_)
def predict_step(self, X_):
self.otlmodel.update(X_)
pred = []
var = []
self.otlmodel.predict(pred, var)
# return np.zeros((1, self.odim))
return np.array(pred).reshape((1, self.odim))
def fit(self, X, y):
"""ActInfOTLModel.fit
Fit model to data X, y
"""
if X.shape[0] > 1: # batch of data
return self.fit_batch(X, y)
X_ = X.flatten().tolist()
# print("X.shape", X.shape, len(X_), X_)
self.otlmodel.update(X_)
# copy state into predefined structure
# self.otlmodel.getState(self.r)
pred = []
var = []
self.otlmodel.predict(pred, var)
y_ = y.flatten().tolist()
self.otlmodel.train(y_)
# self.otlmodel.predict(pred, var)
# print(pred, var)
# return np.array(pred).reshape((1, self.odim))
def fit_batch(self, X, y):
for i in range(X.shape[0]):
self.fit(X[[i]], y[[i]])
def save(self, filename):
otlmodel_ = self.otlmodel
self.otlmodel.save(filename + "_%s_model" % self.otlmodel_type)
print("otlmodel", otlmodel_)
self.otlmodel = None
print("otlmodel", otlmodel_)
cPickle.dump(self, open(filename, "wb"))
self.otlmodel = otlmodel_
print("otlmodel", self.otlmodel)
@classmethod
def load(cls, filename):
# otlmodel_ = cls.otlmodel
otlmodel_wrap = cPickle.load(open(filename, "rb"))
print("%s.load cls.otlmodel filename = %s, otlmodel_wrap.otlmodel_type = %s" % (cls.__name__, filename, otlmodel_wrap.otlmodel_type))
if otlmodel_wrap.otlmodel_type == "soesgp":
otlmodel_cls = OESGP
elif otlmodel_wrap.otlmodel_type == "storkgp":
otlmodel_cls = STORKGP
else:
otlmodel_cls = OESGP
otlmodel_wrap.otlmodel = otlmodel_cls()
print("otlmodel_wrap.otlmodel", otlmodel_wrap.otlmodel)
otlmodel_wrap.otlmodel.load(filename + "_%s_model" % otlmodel_wrap.otlmodel_type)
# print("otlmodel_wrap.otlmodel", dir(otlmodel_wrap.otlmodel))
# cls.bootstrap(otlmodel_wrap)
# otlmodel_wrap.otlmodel = otlmodel_
return otlmodel_wrap
################################################################################
# Sparse Online Echo State Gaussian Process (SOESGP) OTL library model
class ActInfSOESGP(ActInfOTLModel):
"""sparse online echo state gaussian process function approximator
for active inference"""
def __init__(self, idim = 1, odim = 1):
ActInfOTLModel.__init__(self, idim, odim)
self.otlmodel_type = "soesgp"
self.otlmodel = OESGP()
self.res_size = 100 # 20
self.input_weight = 1.0
self.output_feedback_weight = 0.0
self.activation_function = 1
# leak_rate: x <= (1-lr) * input + lr * x
self.leak_rate = 0.1 # 0.1
self.connectivity = 0.1
self.spectral_radius = 0.7
self.kernel_params = [1.0, 1.0]
self.noise = 0.05
self.epsilon = 1e-3
self.capacity = 100
self.random_seed = 100
# self.X_ = []
# self.y_ = []
self.bootstrap()
def bootstrap(self):
from reservoirs import res_input_matrix_random_sparse
self.otlmodel.init(self.idim, self.odim, self.res_size, self.input_weight,
self.output_feedback_weight, self.activation_function,
self.leak_rate, self.connectivity, self.spectral_radius,
False, self.kernel_params, self.noise, self.epsilon,
self.capacity, self.random_seed)
im = res_input_matrix_random_sparse(self.idim, self.res_size, 0.2)
# print("im", type(im))
self.otlmodel.setInputWeights(im.tolist())
################################################################################
# StorkGP OTL based model
class ActInfSTORKGP(ActInfOTLModel):
"""sparse online echo state gaussian process function approximator
for active inference"""
def __init__(self, idim = 1, odim = 1):
ActInfModel.__init__(self, idim, odim)
self.otlmodel_type = "storkgp"
self.otlmodel = STORKGP()
self.res_size = 100 # 20
self.bootstrap()
def bootstrap(self):
self.otlmodel.init(self.idim, self.odim,
self.res_size, # window size
0, # kernel type
[0.5, 0.99, 1.0, self.idim],
1e-4,
1e-4,
100
)
################################################################################
# inference type multivalued models: GMM, SOMHebb, MDN
# these are somewhat different in operation than the models above
# - fit vs. fit_batch
# - can create conditional submodels
# GMM - gaussian mixture model
class ActInfGMM(ActInfModel):
def __init__(self, idim = 1, odim = 1, K = 10, numepisodes = 10):
"""ActInfGMM"""
ActInfModel.__init__(self, idim, odim)
# number of mixture components
self.K = K
# list of K component idim x 1 centroid vectors
self.cen_lst = []
# list of K component idim x idim covariances
self.cov_lst = []
# K mixture coeffs
self.p_k = None
# log loss after training
self.logL = 0
self.cdim = self.idim + self.odim
# data
self.Xy_ = []
self.X_ = []
self.y_ = []
self.Xy = np.zeros((1, self.cdim))
# fitting configuration
self.fit_interval = 100
self.fitted = False
print("%s.__init__, idim = %d, odim = %d" % (self.__class__.__name__, self.idim, self.odim))
def fit(self, X, y):
"""ActInfGMM single step fit: X, y are single patterns"""
# print("%s.fit" % (self.__class__.__name__), X.shape, y.shape)
if X.shape[0] == 1:
# single step update, add to internal data and refit if length matches update intervale
self.Xy_.append(np.hstack((X[0], y[0])))
self.X_.append(X[0])
self.y_.append(y[0])
if len(self.Xy_) % self.fit_interval == 0:
# print("len(Xy_)", len(self.Xy_), self.Xy_[99])
# pl.plot(self.Xy_)
# pl.show()
# self.fit_batch(self.Xy)
self.fit_batch(self.X_, self.y_)
else:
# batch fit, just fit model to the input data batch
self.Xy_ += np.hstack((X, y)).tolist()
# self.X_ += X.tolist()
# self.y_ += y.tolist()
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# print("X_, y_", self.X_, self.y_)
self.fit_batch(X, y)
def fit_batch(self, X, y):
"""ActInfGMM Fit the GMM model with batch data"""
# print("%s.fit X.shape = %s, y.shape = %s" % (self.__class__.__name__, X.shape, y.shape))
# self.Xy = np.hstack((X[:,3:], y[:,:]))
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# self.Xy = Xy
# X = np.asarray(X_)
# y = np.asarray(y_)
self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
print("%s.fit_batch self.Xy.shape = %s" % (self.__class__.__name__, self.Xy.shape))
# fit gmm
self.cen_lst, self.cov_lst, self.p_k, self.logL = gmm.em_gm(self.Xy, K = self.K, max_iter = 1000,
verbose = False, iter_call = None)
self.fitted = True
print("%s.fit_batch Log likelihood (how well the data fits the model) = %f" % (self.__class__.__name__, self.logL))
def predict(self, X):
"""ActInfGMM predict: forward to default sample call"""
return self.sample(X)
def sample(self, X):
"""ActInfGMM default sample function
assumes the input is X with dims = idim located in
the first part of the conditional inference combined input vector
this method construct the corresponding conditioning input from the reduced input
"""
# print("%s.sample: X.shape = %s, idim = %d" % (self.__class__.__name__, X.shape, self.idim))
assert X.shape[1] == self.idim
# cond = np.zeros((, self.cdim))
uncond = np.empty((X.shape[0], self.odim))
uncond[:] = np.nan
# print("%s.sample: uncond.shape = %s" % (self.__class__.__name__, uncond.shape))
# np.array([np.nan for i in range(self.odim)])
cond = np.hstack((X, uncond))
# cond[:self.idim] = X.copy()
# cond[self.idim:] = np.nan
# print("%s.sample: cond.shape = %s" % (self.__class__.__name__, cond.shape))
if X.shape[0] > 1: # batch
return self.sample_batch(cond)
return self.sample_cond(cond)
def sample_cond(self, X):
"""ActInfGMM single sample from the GMM model with conditioning single input pattern X
TODO: function conditional_dist, make predict/sample comply with sklearn and use the lowlevel
cond_dist for advanced uses like dynamic conditioning
"""
if not self.fitted:
# return np.zeros((3,1))
# model has not been bootstrapped, return random goal
return np.random.uniform(-0.1, 0.1, (1, self.odim)) # FIXME hardcoded shape
# gmm.cond_dist want's a (n, ) shape, not (1, n)
if len(X.shape) > 1:
cond = X[0]
else:
cond = X
# print("%s.sample_cond: cond.shape = %s" % (self.__class__.__name__, cond.shape))
(cen_con, cov_con, new_p_k) = gmm.cond_dist(cond, self.cen_lst, self.cov_lst, self.p_k)
cond_sample = gmm.sample_gaussian_mixture(cen_con, cov_con, new_p_k, samples = 1)
# print("%s.sample_cond: cond_sample.shape = %s" % (self.__class__.__name__, cond_sample.shape))
return cond_sample
def sample_batch(self, X):
"""ActInfGMM.sample_batch: If X has more than one rows, return batch of samples for
every condition row in X"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
def sample_batch_legacy(self, X, cond_dims = [0], out_dims = [1], resample_interval = 1):
"""ActInfGMM sample from gmm model with conditioning batch input X"""
# compute conditional
sampmax = 20
numsamplesteps = X.shape[0]
odim = len(out_dims) # self.idim - X.shape[1]
self.y_sample_ = np.zeros((odim,))
self.y_sample = np.zeros((odim,))
self.y_samples_ = np.zeros((sampmax, numsamplesteps, odim))
self.y_samples = np.zeros((numsamplesteps, odim))
self.cond = np.zeros_like(X[0])
print("%s.sample_batch: y_samples_.shape = %s" % (self.__class__.__name__, self.y_samples_.shape))
for i in range(numsamplesteps):
# if i % 100 == 0:
if i % resample_interval == 0:
# print("%s.sample_batch: sampling gmm cond prob at step %d" % (self.__class__.__name__, i))
ref_interval = 1
# self.cond = self.logs["EP"][(i+ref_interval) % self.logs["EP"].shape[0]] # self.X__[i,:3]
self.cond = X[(i+ref_interval) % numsamplesteps] # self.X__[i,:3]
# self.cond = np.array()
# self.cond[:2] = X_
# print(self.cond, out_dims, X.shape)
self.cond[out_dims] = np.nan
(self.cen_con, self.cov_con, self.new_p_k) = gmm.cond_dist(self.cond, self.cen_lst, self.cov_lst, self.p_k)
# print "run_hook_e2p_sample gmm.cond_dist:", np.array(self.cen_con).shape, np.array(self.cov_con).shape, self.new_p_k.shape
samperr = 1e6
j = 0
while samperr > 0.1 and j < sampmax:
self.y_sample = gmm.sample_gaussian_mixture(self.cen_con, self.cov_con, self.new_p_k, samples = 1)
self.y_samples_[j,i] = self.y_sample
samperr_ = np.linalg.norm(self.y_sample - X[(i+1) % numsamplesteps,:odim], 2)
if samperr_ < samperr:
samperr = samperr_
self.y_sample_ = self.y_sample
j += 1
# print "sample/real err", samperr
print("sampled", j, "times")
else:
# retain samples from last sampling interval boundary
self.y_samples_[:,i] = self.y_samples_[:,i-1]
# return sample array
self.y_samples[i] = self.y_sample_
return self.y_samples, self.y_samples_
################################################################################
# Hebbian SOM model: connect to SOMs with hebbian links
class ActInfHebbianSOM(ActInfModel):
def __init__(self, idim = 1, odim = 1, numepisodes = 100):
"""ActInfHebbianSOM"""
ActInfModel.__init__(self, idim, odim, numepisodes = numepisodes)
# SOMs trained?
self.soms_fitted = False
self.fitted = False
# learning rate proxy
self.ET = ExponentialTimeseries
self.CT = ConstantTimeseries
self.mapsize = 10
self.mapsize_e = max(10, self.idim * 3)
self.mapsize_p = max(10, self.odim * 3)
self.numepisodes_som = self.numepisodes
self.numepisodes_hebb = self.numepisodes
# FIXME: make neighborhood_size decrease with time
som_lr = 1e-1 # Haykin, p475
# som_lr = 5e-1
# som_lr = 5e-4
maptype = "som"
# maptype = "gas"
# SOM exteroceptive stimuli 2D input
if maptype == "som":
self.kw_e = self.kwargs(shape = (self.mapsize_e, self.mapsize_e), dimension = self.idim, lr_init = som_lr,
neighborhood_size = self.mapsize_e, z = 0.001)
# self.kw_e = self.kwargs(shape = (self.mapsize_e, self.mapsize_e), dimension = self.idim, lr_init = 0.5, neighborhood_size = 0.6)
self.som_e = Map(Parameters(**self.kw_e))
elif maptype == "gas":
self.kw_e = self.kwargs_gas(shape = (self.mapsize_e ** 2, ), dimension = self.idim, lr_init = som_lr, neighborhood_size = 0.5)
self.som_e = Gas(Parameters(**self.kw_e))
# SOM proprioceptive stimuli 3D input
if maptype == "som":
self.kw_p = self.kwargs(shape = (int(self.mapsize_p), int(self.mapsize_p)), dimension = self.odim, lr_init = som_lr,
neighborhood_size = self.mapsize_p, z = 0.001)
# self.kw_p = self.kwargs(shape = (int(self.mapsize_p * 1.5), int(self.mapsize_p * 1.5)), dimension = self.odim, lr_init = 0.5, neighborhood_size = 0.7)
self.som_p = Map(Parameters(**self.kw_p))
elif maptype == "gas":
self.kw_p = self.kwargs_gas(shape = (self.mapsize_p ** 2, ), dimension = self.odim, lr_init = som_lr, neighborhood_size = 0.5)
self.som_p = Gas(Parameters(**self.kw_p))
# FIXME: there was a nice trick for node distribution init in _some_ recently added paper
# create "filter" using existing SOM_e, filter computes activation on distance
self.filter_e = Filter(self.som_e, history=lambda: 0.0)
self.filter_e.reset()
self.filter_e_lr = self.filter_e.map._learning_rate
# kw_f_p = kwargs(shape = (mapsize * 3, mapsize * 3), dimension = 3, neighborhood_size = 0.5, lr_init = 0.1)
# filter_p = Filter(Map(Parameters(**kw_f_p)), history=lambda: 0.01)
# create "filter" using existing SOM_p, filter computes activation on distance
self.filter_p = Filter(self.som_p, history=lambda: 0.0)
self.filter_p.reset()
self.filter_p_lr = self.filter_p.map._learning_rate
# Hebbian links
# hebblink_som = np.random.uniform(-1e-4, 1e-4, (np.prod(som_e._shape), np.prod(som_p._shape)))
# hebblink_filter = np.random.uniform(-1e-4, 1e-4, (np.prod(filter_e.map._shape), np.prod(filter_p.map._shape)))
self.hebblink_som = np.zeros((np.prod(self.som_e._shape), np.prod(self.som_p._shape)))
# self.hebblink_filter = np.zeros((np.prod(self.filter_e.map._shape), np.prod(self.filter_p.map._shape)))
self.hebblink_filter = np.random.normal(0, 1e-6, (np.prod(self.filter_e.map._shape), np.prod(self.filter_p.map._shape)))
# # sparse hebblink
# self.hebblink_filter = sparse.rand(m = np.prod(self.filter_e.map._shape),
# n = np.prod(self.filter_p.map._shape)) * 1e-3
self.hebblink_use_activity = True # use activation or distance
# Hebbian learning rate
if self.hebblink_use_activity:
self.hebblink_et = ExponentialTimeseries(-1e-4, 1e-3, 0)
# self.hebblink_et = ConstantTimeseries(5e-3)
# et = ConstantTimeseries(0.5)
else:
self.hebblink_et = ConstantTimeseries(1e-12)
# SOM argument dict
def kwargs(self, shape=(10, 10), z=0.001, dimension=2, lr_init = 1.0, neighborhood_size = 1):
"""ActInfHebbianSOM params function for Map"""
return dict(dimension=dimension,
shape=shape,
neighborhood_size = self.ET(-1e-3, neighborhood_size, 1.0),
learning_rate=self.ET(-1e-4, lr_init, 0.01),
# learning_rate=self.CT(lr_init),
noise_variance=z)
def kwargs_gas(self, shape=(100,), z=0.001, dimension=3, lr_init = 1.0, neighborhood_size = 1):
"""ActInfHebbianSOM params function for Gas"""
return dict(dimension=dimension,
shape=shape,
neighborhood_size = self.ET(-1e-3, neighborhood_size, 1),
learning_rate=self.ET(-1e-4, lr_init, 0.01),
noise_variance=z)
def set_learning_rate_constant(self, c = 0.0):
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(c)
self.filter_p.map._learning_rate = self.CT(c)
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
def fit_soms(self, X, y):
"""ActInfHebbianSOM"""
# print("%s.fit_soms fitting X = %s, y = %s" % (self.__class__.__name__, X.shape, y.shape))
# if X.shape[0] != 1, r
# e = EP[i,:dim_e]
# p = EP[i,dim_e:]
self.filter_e.map._learning_rate = self.filter_e_lr
self.filter_p.map._learning_rate = self.filter_p_lr
# don't learn twice
# som_e.learn(e)
# som_p.learn(p)
# TODO for j in numepisodes
if X.shape[0] > 1:
numepisodes = self.numepisodes_som
else:
numepisodes = 1
if X.shape[0] > 100:
print("%s.fit_soms batch fitting of size %d" % (self.__class__.__name__, X.shape[0]))
i = 0
j = 0
eps_convergence = 0.01
# eps_convergence = 0.005
dWnorm_e_ = 1 # short horizon
dWnorm_p_ = 1
dWnorm_e__ = dWnorm_e_ + 2 * eps_convergence # long horizon
dWnorm_p__ = dWnorm_p_ + 2 * eps_convergence
idx_shuffle = np.arange(X.shape[0])
# for j in range(numepisodes):
# (dWnorm_e_ == 0 and dWnorm_p_ == 0) or
# while (dWnorm_e_ > 0.05 and dWnorm_p_ > 0.05):
do_convergence = True
while (do_convergence) and (np.abs(dWnorm_e__ - dWnorm_e_) > eps_convergence and np.abs(dWnorm_p__ - dWnorm_p_) > eps_convergence):
if j > 0 and j % 10 == 0:
print("%s.fit_soms episode %d / %d" % (self.__class__.__name__, j, numepisodes))
if X.shape[0] == 1:
print("no convergence")
do_convergence = False
dWnorm_e = 0
dWnorm_p = 0
np.random.shuffle(idx_shuffle)
for i in range(X.shape[0]):
# lidx = idx_shuffle[i]
lidx = i
self.filter_e.learn(X[lidx])
dWnorm_e += np.linalg.norm(self.filter_e.map.delta)
self.filter_p.learn(y[lidx])
dWnorm_p += np.linalg.norm(self.filter_p.map.delta)
dWnorm_e /= X.shape[0]
dWnorm_e /= self.filter_e.map.numunits
dWnorm_p /= X.shape[0]
dWnorm_p /= self.filter_p.map.numunits
# short
dWnorm_e_ = 0.8 * dWnorm_e_ + 0.2 * dWnorm_e
dWnorm_p_ = 0.8 * dWnorm_p_ + 0.2 * dWnorm_p
# long
dWnorm_e__ = 0.83 * dWnorm_e__ + 0.17 * dWnorm_e_
dWnorm_p__ = 0.83 * dWnorm_p__ + 0.17 * dWnorm_p_
print("%s.fit_soms batch e |dW| = %f, %f, %f" % (self.__class__.__name__, dWnorm_e, dWnorm_e_, dWnorm_e__))
print("%s.fit_soms batch p |dW| = %f, %f, %f" % (self.__class__.__name__, dWnorm_p, dWnorm_p_, dWnorm_p__))
j += 1
# print("%s.fit_soms batch e mean error = %f" % (self.__class__.__name__, np.asarray(self.filter_e.distances_).mean() ))
# print("%s.fit_soms batch p mean error = %f, min = %f, max = %f" % (self.__class__.__name__, np.asarray(self.filter_p.distances_).mean(), np.asarray(self.filter_p.distances_[-1]).min(), np.asarray(self.filter_p.distances_).max() ))
# print np.argmin(som_e.distances(e)) # , som_e.distances(e)
def fit_hebb(self, X, y):
"""ActInfHebbianSOM"""
# print("%s.fit_hebb fitting X = %s, y = %s" % (self.__class__.__name__, X.shape, y.shape))
# numepisodes_hebb = 1
if X.shape[0] > 100:
print("%s.fit_hebb batch fitting of size %d" % (self.__class__.__name__, X.shape[0]))
numsteps = X.shape[0]
################################################################################
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(0.0)
self.filter_p.map._learning_rate = self.CT(0.0)
e_shape = (np.prod(self.filter_e.map._shape), 1)
p_shape = (np.prod(self.filter_p.map._shape), 1)
eps_convergence = 0.05
z_err_coef_1 = 0.8
z_err_coef_2 = 0.83
z_err_norm_ = 1 # fast
z_err_norm__ = z_err_norm_ + 2 * eps_convergence # slow
Z_err_norm = np.zeros((self.numepisodes_hebb*numsteps,1))
Z_err_norm_ = np.zeros((self.numepisodes_hebb*numsteps,1))
W_norm = np.zeros((self.numepisodes_hebb*numsteps,1))
# # plotting
# pl.ion()
# fig = pl.figure()
# fig2 = pl.figure()
# TODO for j in numepisodes
# j = 0
if X.shape[0] > 1:
numepisodes = self.numepisodes_hebb
else:
numepisodes = 1
i = 0
dWnorm_ = 10.0
j = 0
# for j in range(numepisodes):
do_convergence = True
while do_convergence and z_err_norm_ > eps_convergence and np.abs(z_err_norm__ - z_err_norm_) > eps_convergence:
if j > 0 and j % 10 == 0:
print("%s.fit_hebb episode %d / %d" % (self.__class__.__name__, j, numepisodes))
if X.shape[0] == 1:
print("no convergence")
do_convergence = False
for i in range(X.shape[0]):
# just activate
self.filter_e.learn(X[i])
self.filter_p.learn(y[i])
# fetch data induced activity
if self.hebblink_use_activity:
p_ = self.filter_p.activity.reshape(p_shape)
# print(p_.shape)
else:
p_ = self.filter_p.distances(p).flatten().reshape(p_shape)
p__ = p_.copy()
p_ = (p_ == np.max(p_)) * 1.0
e_ = self.filter_e.activity.flatten()
e__ = e_.copy()
e_ = (e_ == np.max(e_)) * 1.0
# compute prediction for p using e activation and hebbian weights
if self.hebblink_use_activity:
# print(self.hebblink_filter.T.shape, self.filter_e.activity.reshape(e_shape).shape)
# p_bar = np.dot(self.hebblink_filter.T, self.filter_e.activity.reshape(e_shape))
# e_act = e_.reshape(e_shape)
# e_act
p_bar = np.dot(self.hebblink_filter.T, e_.reshape(e_shape))
# # sparse
# p_bar = self.hebblink_filter.T.dot(e_.reshape(e_shape))
# print("p_bar", type(p_bar))
else:
p_bar = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
p_bar_ = p_bar.copy()
p_bar = (p_bar == np.max(p_bar)) * 1.0
# print("p_bar", type(p_bar), type(p_bar_))
# # plotting
# ax1 = fig.add_subplot(411)
# ax1.cla()
# ax1.plot(e_ * np.max(e__))
# ax1.plot(e__)
# ax2 = fig.add_subplot(412)
# ax2.cla()
# ax2.plot(p_ * np.max(p_bar_))
# ax2.plot(p__)
# ax2.plot(p_bar * np.max(p_bar_))
# ax2.plot(p_bar_)
# ax3 = fig.add_subplot(413)
# ax3.cla()
# ax3.plot(self.filter_e.distances_[-1])
# ax4 = fig.add_subplot(414)
# ax4.cla()
# ax4.plot(self.filter_p.distances_[-1])
# pl.pause(0.001)
# pl.draw()
# inject activity prediction
p_bar_sum = p_bar.sum()
if p_bar_sum > 0:
p_bar_normed = p_bar / p_bar_sum
else:
p_bar_normed = np.zeros(p_bar.shape)
# compute prediction error: data induced activity - prediction
# print("p_", np.linalg.norm(p_))
# print("p_bar", np.linalg.norm(p_bar))
z_err = p_ - p_bar
idx = np.argmax(p_bar_)
# print("sum E", np.sum(z_err))
# print("idx", p_bar_, idx, z_err[idx])
# z_err = (p_[idx] - p_bar[idx]) * np.ones_like(p_)
# z_err = np.ones_like(p_) *
# print("z_err", z_err)
# z_err = p_bar - p_
# z_err_norm = np.linalg.norm(z_err, 2)
z_err_norm = np.sum(np.abs(z_err))
# if j == 0 and i == 0:
# z_err_norm_ = z_err_norm
# else:
z_err_norm_ = z_err_coef_1 * z_err_norm_ + (1 - z_err_coef_1) * z_err_norm
z_err_norm__ = z_err_coef_2 * z_err_norm__ + (1 - z_err_coef_2) * z_err_norm
w_norm = np.linalg.norm(self.hebblink_filter)
# logidx = (j*numsteps) + i
# Z_err_norm [logidx] = z_err_norm
# Z_err_norm_[logidx] = z_err_norm_
# W_norm [logidx] = w_norm
# z_err = p_bar - self.filter_p.activity.reshape(p_bar.shape)
# print "p_bar.shape", p_bar.shape
# print "self.filter_p.activity.flatten().shape", self.filter_p.activity.flatten().shape
# if i % 100 == 0:
# print("%s.fit_hebb: iter %d/%d: z_err.shape = %s, |z_err| = %f, |W| = %f, |p_bar_normed| = %f" % (self.__class__.__name__, logidx, (self.numepisodes_hebb*numsteps), z_err.shape, z_err_norm_, w_norm, np.linalg.norm(p_bar_normed)))
# d_hebblink_filter = et() * np.outer(self.filter_e.activity.flatten(), self.filter_p.activity.flatten())
if self.hebblink_use_activity:
# eta = 5e-4
eta = self.hebblink_et()
# outer = np.outer(self.filter_e.activity.flatten(), np.clip(z_err, 0, 1))
# outer = np.outer(e_, np.clip(z_err, 0, 1))
# outer = np.outer(e_, p_)
# outer = np.outer(e_, p__ * np.clip(z_err, 0, 1))
outer = np.outer(e_ * e__, p_)
# print(outer.shape, self.hebblink_filter.shape)
# print("outer", outer)
# print("modulator", z_err[idx])
# d_hebblink_filter = eta * outer * (-1e-3 - z_err[idx])
# d_hebblink_filter = eta * np.outer(z_err, self.filter_e.activity.flatten()).T
# d_hebblink_filter = eta * outer * np.abs((z_err_norm_ - z_err_norm))
# d_hebblink_filter = eta * outer * (z_err_norm - z_err_norm_)
d_hebblink_filter = eta * outer
# # plotting
# f2ax1 = fig2.add_subplot(111)
# f2ax1.imshow(self.hebblink_filter.T, interpolation="none")
# # im = f2ax1.imshow(outer, interpolation="none")
# # f2ax2 = pl.colorbar(im, ax=f2ax1)
# pl.pause(1e-5)
# pl.draw()
else:
d_hebblink_filter = self.hebblink_et() * np.outer(self.filter_e.distances(e), z_err)
dWnorm = np.linalg.norm(d_hebblink_filter)
dWnorm_ = 0.8 * dWnorm_ + 0.2 * dWnorm
# print ("dWnorm", dWnorm)
self.hebblink_filter += d_hebblink_filter
print("hebblink_filter type", type(self.hebblink_filter))
print("np.linalg.norm(self.hebblink_filter, 2)", np.linalg.norm(self.hebblink_filter, 2))
self.hebblink_filter /= np.linalg.norm(self.hebblink_filter, 2)
print("hebblink_filter type", type(self.hebblink_filter))
# print(Z_err_norm)
# print("%s.fit_hebb error p/p_bar %f" % (self.__class__.__name__, np.array(Z_err_norm)[:logidx].mean()))
print("%s.fit_hebb |dW| = %f, |W| = %f, mean err = %f / %f" % (self.__class__.__name__, dWnorm_, w_norm, z_err_norm_, z_err_norm__))
# print("%s.fit_hebb |W| = %f" % (self.__class__.__name__, w_norm))
j += 1
def fit(self, X, y):
"""ActInfHebbianSOM"""
# print("%s.fit fitting X = %s, y = %s" % (self.__class__.__name__, X, y))
# if X,y have more than one row, train do batch training on SOMs and links
# otherwise do single step update on both or just the latter?
self.fit_soms(X, y)
self.fit_hebb(X, y)
self.fitted = True
def predict(self, X):
"""ActInfHebbianSOM"""
return self.sample(X)
def sample(self, X):
"""ActInfHebbianSOM.sample"""
# print("%s.sample X.shape = %s, %d" % (self.__class__.__name__, X.shape, 0))
if len(X.shape) == 2 and X.shape[0] > 1: # batch
return self.sample_batch(X)
return self.sample_cond(X)
def sample_cond(self, X):
"""ActInfHebbianSOM.sample_cond: draw single sample from model conditioned on X"""
# print("%s.sample_cond X.shape = %s, %d" % (self.__class__.__name__, X.shape, 0))
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(0.0)
self.filter_p.map._learning_rate = self.CT(0.0)
e_shape = (np.prod(self.filter_e.map._shape), 1)
p_shape = (np.prod(self.filter_p.map._shape), 1)
# activate input network
self.filter_e.learn(X)
# pl.plot(self.filter_e.
# propagate activation via hebbian associative links
if self.hebblink_use_activity:
e_ = self.filter_e.activity.reshape((np.prod(self.filter_e.map._shape), 1))
e_ = (e_ == np.max(e_)) * 1.0
e2p_activation = np.dot(self.hebblink_filter.T, e_)
# print("e2p_activation", e2p_activation)
self.filter_p.activity = np.clip((e2p_activation / (np.sum(e2p_activation) + 1e-9)).reshape(self.filter_p.map._shape), 0, np.inf)
else:
e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
# sample the output network with
sidx = self.filter_p.sample(1)[0]
e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(sidx))
# e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(np.argmax(self.filter_p.activity)))
return e2p_w_p_weights.reshape((1, self.odim))
# ret = np.random.normal(e2p_w_p_weights, self.filter_p.sigmas[sidx] * 0.001, (1, self.odim))
# ret = np.random.normal(e2p_w_p_weights, 0.01, (1, self.odim))
# return ret
def sample_cond_legacy(self, X):
"""ActInfHebbianSOM.sample_cond: sample from model conditioned on X"""
sampling_search_num = 100
e_shape = (np.prod(self.filter_e.map._shape), 1)
p_shape = (np.prod(self.filter_p.map._shape), 1)
# P_ = np.zeros((X.shape[0], self.odim))
# E_ = np.zeros((X.shape[0], self.idim))
e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(self.filter_p.sample(1)[0]))
for i in range(X.shape[0]):
# e = EP[i,:dim_e]
# p = EP[i,dim_e:]
e = X[i]
# print np.argmin(som_e.distances(e)), som_e.distances(e)
self.filter_e.learn(e)
# print "self.filter_e.winner(e)", self.filter_e.winner(e)
# filter_p.learn(p)
# print "self.filter_e.activity.shape", self.filter_e.activity.shape
# import pdb; pdb.set_trace()
if self.hebblink_use_activity:
e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.activity.reshape((np.prod(self.filter_e.map._shape), 1)))
self.filter_p.activity = np.clip((e2p_activation / np.sum(e2p_activation)).reshape(self.filter_p.map._shape), 0, np.inf)
else:
e2p_activation = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
# print "e2p_activation.shape, np.sum(e2p_activation)", e2p_activation.shape, np.sum(e2p_activation)
# print "self.filter_p.activity.shape", self.filter_p.activity.shape
# print "np.sum(self.filter_p.activity)", np.sum(self.filter_p.activity), (self.filter_p.activity >= 0).all()
# self.filter_p.learn(p)
# emodes: 0, 1, 2
emode = 0 #
if i % 1 == 0:
if emode == 0:
e2p_w_p_weights_ = []
for k in range(sampling_search_num):
# filter.sample return the index of the sampled unit
e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(self.filter_p.sample(1)[0]))
e2p_w_p_weights_.append(e2p_w_p_weights)
pred = np.array(e2p_w_p_weights_)
# print "pred", pred
# # if we can compare against something
# pred_err = np.linalg.norm(pred - p, 2, axis=1)
# # print "np.linalg.norm(e2p_w_p_weights - p, 2)", np.linalg.norm(e2p_w_p_weights - p, 2)
# e2p_w_p = np.argmin(pred_err)
# if not pick any
e2p_w_p = np.random.choice(pred.shape[0])
# print("pred_err", e2p_w_p, pred_err[e2p_w_p])
e2p_w_p_weights = e2p_w_p_weights_[e2p_w_p]
elif emode == 1:
if self.hebblink_use_activity:
e2p_w_p = np.argmax(e2p_activation)
else:
e2p_w_p = np.argmin(e2p_activation)
e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(e2p_w_p))
elif emode == 2:
e2p_w_p = self.filter_p.winner(p)
e2p_w_p_weights = self.filter_p.neuron(self.filter_p.flat_to_coords(e2p_w_p))
# P_[i] = e2p_w_p_weights
# E_[i] = environment.compute_sensori_effect(P_[i])
# print("e2p shape", e2p_w_p_weights.shape)
return e2p_w_p_weights.reshape((1, self.odim))
def sample_batch(self, X):
"""ActInfHebbianSOM.sample_batch: If X has more than one rows, return batch of samples for
every condition row in X"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
def sample_batch_legacy(self, X, cond_dims = [0], out_dims = [1], resample_interval = 1):
"""ActInfHebbianSOM"""
print("%s.sample_batch_legacy data X = %s" % (self.__class__.__name__, X))
sampmax = 20
numsamplesteps = X.shape[0]
odim = len(out_dims) # self.idim - X.shape[1]
self.y_sample_ = np.zeros((odim,))
self.y_sample = np.zeros((odim,))
self.y_samples_ = np.zeros((sampmax, numsamplesteps, odim))
self.y_samples = np.zeros((numsamplesteps, odim))
self.cond = np.zeros_like(X[0])
return self.y_samples, self.y_samples_
# MDN model: karpathy, hardmaru, amjad, cbonnett, edward
def hebbsom_get_map_nodes(mdl, idim, odim):
e_nodes = mdl.filter_e.map.neurons
p_nodes = mdl.filter_p.map.neurons
print(e_nodes.shape, p_nodes.shape)
e_nodes = e_nodes.reshape((-1,idim))
p_nodes = p_nodes.reshape((-1,odim))
print(e_nodes.shape, p_nodes.shape)
return (e_nodes, p_nodes)
def plot_nodes_over_data_1d_components(X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov):
"""one-dimensional plot of each components of X and Y together with those of SOM nodes for all i and o components"""
idim = X.shape[1]
odim = Y.shape[1]
fig1 = pl.figure()
fig1.suptitle("One-dimensional breakdown of SOM nodes per input dimension (%s)" % (mdl.__class__.__name__))
numplots = idim + odim
gs = gridspec.GridSpec(numplots, 1)
for i in range(idim):
ax = fig1.add_subplot(gs[i,0])
ax.hist(X[:,i], bins=20)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
yran = ylim[1] - ylim[0]
offset1 = yran * -0.1
offset2 = yran * -0.25
# print("offsets 1,2 = %f, %f" % (offset1, offset2))
ax.plot(X[:,i], np.ones_like(X[:,i]) * offset1, "ko", alpha=0.33)
for j,node in enumerate(e_nodes[:,i]):
myms = 2 + 30 * np.sqrt(e_nodes_cov[i,i,i])
# print("node", j, node, myms)
ax.plot([node], [offset2], "ro", alpha=0.33, markersize=10)
# ax.plot([node], [offset2], "r.", alpha=0.33, markersize = myms)
# x1, x2 = gmm.
ax.text(node, offset2, "n%d" % j, fontsize=6)
# pl.plot(e_nodes[:,i], np.zeros_like(e_nodes[:,i]), "ro", alpha=0.33, markersize=10)
for i in range(idim, numplots):
ax = fig1.add_subplot(gs[i,0])
ax.hist(Y[:,i-idim], bins=20)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
yran = ylim[1] - ylim[0]
offset1 = yran * -0.1
offset2 = yran * -0.25
# print("offsets 1,2 = %f, %f" % (offset1, offset2))
ax.plot(Y[:,i-idim], np.ones_like(Y[:,i-idim]) * offset1, "ko", alpha=0.33)
for j,node in enumerate(p_nodes[:,i-idim]):
myms = 2 + 30 * np.sqrt(p_nodes_cov[i-idim,i-idim,i-idim])
# print("node", j, node, myms)
ax.plot([node], [offset2], "ro", alpha=0.33, markersize=10)
# ax.plot([node], [offset2], "r.", alpha=0.33, markersize = myms)
ax.text(node, offset2, "n%d" % j, fontsize=6)
# pl.plot(p_nodes[:,i-idim], np.zeros_like(p_nodes[:,i-idim]), "ro", alpha=0.33, markersize=10)
fig1.show()
# pl.show()
def plot_nodes_over_data_scattermatrix(X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov):
"""plot input data distribution and SOM node locations as scattermatrix all X comps over all Y comps
X, Y, e_nodes, p_nodes"""
import pandas as pd
from pandas.tools.plotting import scatter_matrix
idim = X.shape[1]
odim = Y.shape[1]
numplots = idim + odim
# e_nodes, p_nodes = hebbsom_get_map_nodes(mdl, idim, odim)
dfcols = []
dfcols += ["e_%d" % i for i in range(idim)]
dfcols += ["p_%d" % i for i in range(odim)]
# X_plus_e_nodes = np.vstack((X, e_nodes))
# Y_plus_p_nodes = np.vstack((Y, p_nodes))
# df = pd.DataFrame(np.hstack((X_plus_e_nodes, Y_plus_p_nodes)), columns=dfcols)
df = pd.DataFrame(np.hstack((X, Y)), columns=dfcols)
sm = scatter_matrix(df, alpha=0.2, figsize=(5,5), diagonal="hist")
print("sm = %s" % (sm))
# loop over i/o components
idims = range(idim)
odims = range(idim, idim+odim)
for i in range(numplots):
for j in range(numplots):
if i != j and i in idims and j in idims:
# center = np.array()
# x1, x2 = gmm.gauss_ellipse_2d(centroids[i], ccov[i])
sm[i,j].plot(e_nodes[:,j], e_nodes[:,i], "ro", alpha=0.5, markersize=8)
if i != j and i in odims and j in odims:
sm[i,j].plot(p_nodes[:,j-idim], p_nodes[:,i-idim], "ro", alpha=0.5, markersize=8)
# if i != j and i in idims and j in odims:
# sm[i,j].plot(p_nodes[:,j-idim], e_nodes[:,i], "go", alpha=0.5, markersize=8)
# if i != j and i in odims and j in idims:
# sm[i,j].plot(e_nodes[:,j], p_nodes[:,i-idim], "go", alpha=0.5, markersize=8)
# get figure reference from axis and show
fig2 = sm[0,0].get_figure()
fig2.suptitle("Predictions over data scattermatrix (%s)" % (mdl.__class__.__name__))
fig2.show()
def hebbsom_predict_full(X, Y, mdl):
distances = []
activities = []
predictions = np.zeros_like(Y)
# have to loop over single steps until we generalize predict function to also yield distances and activities
for h in range(X.shape[0]):
# X_ = (Y[h]).reshape((1, odim))
X_ = X[h]
# print("X_", X_.shape, X_)
# predict proprio 3D from extero 2D
predictions[h] = mdl.predict(X_)
# print("X_.shape = %s, %d" % (X_.shape, 0))
# print("prediction.shape = %s, %d" % (prediction.shape, 0))
distances.append(mdl.filter_e.distances(X_).flatten())
activities.append(mdl.filter_e.activity.flatten())
activities_sorted = activities[-1].argsort()
# print("Y[h]", h, Y[h].shape, prediction.shape)
return (predictions, distances, activities)
################################################################################
# plot nodes over data with scattermatrix and data hexbin
def plot_nodes_over_data_scattermatrix_hexbin(X, Y, mdl, predictions, distances, activities):
"""plot single components X over Y with SOM sample"""
idim = X.shape[1]
odim = Y.shape[1]
numplots = idim * odim + 2
fig3 = pl.figure()
fig3.suptitle("Predictions over data xy scattermatrix/hexbin (%s)" % (mdl.__class__.__name__))
gs = gridspec.GridSpec(idim, odim)
fig3axes = []
for i in range(idim):
fig3axes.append([])
for o in range(odim):
fig3axes[i].append(fig3.add_subplot(gs[i,o]))
err = 0
# colsa = ["k", "r", "g", "c", "m", "y"]
# colsb = ["k", "r", "g", "c", "m", "y"]
colsa = ["k" for col in range(idim)]
colsb = ["r" for col in range(odim)]
for i in range(odim): # odim * 2
for j in range(idim):
# pl.subplot(numplots, 1, (i*idim)+j+1)
ax = fig3axes[j][i]
# target = Y[h,i]
# X__ = X_[j] # X[h,j]
# err += np.sum(np.square(target - prediction))
# ax.plot(X__, [target], colsa[j] + ".", alpha=0.25, label="target_%d" % i)
# ax.plot(X__, [prediction[0,i]], colsb[j] + "o", alpha=0.25, label="pred_%d" % i)
# ax.plot(X[:,j], Y[:,i], colsa[j] + ".", alpha=0.25, label="target_%d" % i)
ax.hexbin(X[:,j], Y[:,i], gridsize = 20, alpha=0.75, cmap=pl.get_cmap("gray"))
ax.plot(X[:,j], predictions[:,i], colsb[j] + "o", alpha=0.15, label="pred_%d" % i, markersize=8)
# pred1 = mdl.filter_e.neuron(mdl.filter_e.flat_to_coords(activities_sorted[-1]))
# ax.plot(X__, [pred1], "ro", alpha=0.5)
# pred2 = mdl.filter_e.neuron(mdl.filter_e.flat_to_coords(activities_sorted[-2]))
# ax.plot(X__, [pred2], "ro", alpha=0.25)
print("accum total err = %f" % (err / X.shape[0] / (idim * odim)))
fig3.show()
def plot_hebbsom_links_distances_activations(X, Y, mdl, predictions, distances, activities):
"""plot the hebbian link matrix, and all node distances and activities for all inputs"""
hebblink_log = np.log(mdl.hebblink_filter.T + 1.0)
fig4 = pl.figure()
fig4.suptitle("Debugging SOM: hebbian links, distances, activities (%s)" % (mdl.__class__.__name__))
gs = gridspec.GridSpec(4, 1)
# pl.plot(X, Y, "k.", alpha=0.5)
# pl.subplot(numplots, 1, numplots-1)
ax1 = fig4.add_subplot(gs[0])
# im1 = ax1.imshow(mdl.hebblink_filter, interpolation="none", cmap=pl.get_cmap("gray"))
im1 = ax1.pcolormesh(hebblink_log, cmap=pl.get_cmap("gray"))
ax1.set_xlabel("in (e)")
ax1.set_ylabel("out (p)")
cbar = fig4.colorbar(mappable = im1, ax=ax1, orientation="horizontal")
ax2 = fig4.add_subplot(gs[1])
distarray = np.array(distances)
print("distarray.shape", distarray.shape)
pcm = ax2.pcolormesh(distarray.T)
cbar = fig4.colorbar(mappable = pcm, ax=ax2, orientation="horizontal")
# pl.subplot(numplots, 1, numplots)
ax3 = fig4.add_subplot(gs[2])
actarray = np.array(activities)
print("actarray.shape", actarray.shape)
pcm = ax3.pcolormesh(actarray.T)
cbar = fig4.colorbar(mappable = pcm, ax=ax3, orientation="horizontal")
ax4 = fig4.add_subplot(gs[3])
ax4.plot(hebblink_log.flatten())
print("hebblink_log", hebblink_log)
fig4.show()
def plot_predictions_over_data(X, Y, mdl):
# plot prediction
idim = X.shape[1]
odim = Y.shape[1]
numsamples = 2
Y_samples = []
for i in range(numsamples):
Y_samples.append(mdl.predict(X))
# print("Y_samples[0]", Y_samples[0])
fig = pl.figure()
fig.suptitle("Predictions over data xy (numsamples = %d, (%s)" % (numsamples, mdl.__class__.__name__))
gs = gridspec.GridSpec(odim, 1)
for i in range(odim):
ax = fig.add_subplot(gs[i])
target = Y[:,i]
ax.plot(X, target, "k.", label="Y_", alpha=0.5)
for j in range(numsamples):
prediction = Y_samples[j][:,i]
# pl.plot(prediction, target, "r.", label="Y_", alpha=0.25)
ax.plot(X, prediction, "r.", label="Y_", alpha=0.25)
# get limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
error = target - prediction
mse = np.mean(np.square(error))
mae = np.mean(np.abs(error))
xran = xlim[1] - xlim[0]
yran = ylim[1] - ylim[0]
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.3, "mse = %f" % mse)
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.5, "mae = %f" % mae)
fig.show()
def plot_predictions_over_data_ts(X, Y, mdl):
# plot prediction
idim = X.shape[1]
odim = Y.shape[1]
numsamples = 2
Y_samples = []
for i in range(numsamples):
Y_samples.append(mdl.predict(X))
# print("Y_samples[0]", Y_samples[0])
fig = pl.figure()
fig.suptitle("Predictions over data timeseries (numsamples = %d), (%s)" % (numsamples, mdl.__class__.__name__))
gs = gridspec.GridSpec(odim, 1)
for i in range(odim):
# pl.subplot(odim, 2, (i*2)+1)
ax = fig.add_subplot(gs[i])
target = Y[:,i]
ax.plot(target, "k.", label="Y_", alpha=0.5)
# pl.subplot(odim, 2, (i*2)+2)
# prediction = Y_[:,i]
# pl.plot(target, "k.", label="Y")
mses = []
maes = []
errors = []
for j in range(numsamples):
prediction = Y_samples[j][:,i]
error = target - prediction
errors.append(error)
mse = np.mean(np.square(error))
mae = np.mean(np.abs(error))
mses.append(mse)
maes.append(mae)
# pl.plot(prediction, target, "r.", label="Y_", alpha=0.25)
ax.plot(prediction, "r.", label="Y_", alpha=0.25)
errors = np.asarray(errors)
print("errors.shape", errors.shape)
aes = np.min(np.abs(errors), axis=0)
ses = np.min(np.square(errors), axis=0)
mae = np.mean(aes)
mse = np.mean(ses)
# get limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xran = xlim[1] - xlim[0]
yran = ylim[1] - ylim[0]
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.3, "mse = %f" % mse)
ax.text(xlim[0] + xran * 0.1, ylim[0] + yran * 0.5, "mae = %f" % mae)
# pl.plot(X[:,i], Y[:,i], "k.", alpha=0.25)
fig.show()
def get_class_from_name(name = "KNN"):
if name == "KNN":
cls = ActInfKNN
elif name == "SOESGP":
cls = ActInfSOESGP
elif name == "STORKGP":
cls = ActInfSTORKGP
elif name == "GMM":
cls = partial(ActInfGMM, K = 20)
elif name == "HebbSOM":
cls = ActInfHebbianSOM
else:
cls = ActInfKNN
return cls
def generate_inverted_sinewave_dataset(N = 1000):
X = np.linspace(0,1,N)
Y = X + 0.3 * np.sin(2*3.1415926*X) + np.random.uniform(-0.1, 0.1, N)
X,Y = Y[:,np.newaxis],X[:,np.newaxis]
# pl.subplot(211)
# pl.plot(Y, X, "ko", alpha=0.25)
# pl.subplot(212)
# pl.plot(X, Y, "ko", alpha=0.25)
# pl.show()
return X,Y
def test_model(args):
"""actinf_models.test_model
Test the model type given in args.modelclass on data
"""
# import pylab as pl
np.random.seed(args.seed)
# get last component of datafile, the actual filename
datafilepath_comps = args.datafile.split("/")
if datafilepath_comps[-1].startswith("EP"):
idim = 2
odim = 3
EP = np.load(args.datafile)
sl = slice(0, args.numsteps)
X = EP[sl,:idim]
Y = EP[sl,idim:]
# print("X_std.shape", X_std.shape)
elif datafilepath_comps[-1].startswith("NAO_EP"):
idim = 4
odim = 4
EP = np.load(args.datafile)
sl = slice(0, args.numsteps)
X = EP[sl,:idim]
Y = EP[sl,idim:]
elif args.datafile.startswith("inverted"):
idim = 1
odim = 1
X,Y = generate_inverted_sinewave_dataset(N = args.numsteps)
else:
idim = 1
odim = 1
X_mu = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
Y_mu = np.mean(Y, axis=0)
Y_std = np.std(Y, axis=0)
X -= X_mu
X /= X_std
Y -= Y_mu
Y /= Y_std
if args.modelclass == "GMM":
dim = idim + odim
# diagnostics
print("X.shape = %s, idim = %d, Y.shape = %s, odim = %d" % (X.shape, idim, Y.shape, odim))
# pl.subplot(211)
# pl.plot(X)
# pl.subplot(212)
# pl.plot(Y)
# pl.show()
mdlcls = get_class_from_name(args.modelclass)
mdl = mdlcls(idim = idim, odim = odim)
if args.modelclass == "HebbSOM":
mdl = mdlcls(idim = idim, odim = odim, numepisodes = args.numepisodes)
print("Testing model class %s, %s" % (mdlcls, mdl))
print("Fitting model with X.shape = %s, Y.shape = %s" % (X.shape, Y.shape))
mdl.fit(X, Y)
print("Plotting model %s, %s" % (mdlcls, mdl))
if args.modelclass == "HebbSOM":
e_nodes, p_nodes = hebbsom_get_map_nodes(mdl, idim, odim)
e_nodes_cov = np.tile(np.eye(idim) * 0.05, e_nodes.shape[0]).T.reshape((e_nodes.shape[0], idim, idim))
p_nodes_cov = np.tile(np.eye(odim) * 0.05, p_nodes.shape[0]).T.reshape((p_nodes.shape[0], odim, odim))
# print("nodes", e_nodes, p_nodes)
# print("covs", e_nodes_cov, p_nodes_cov)
# print("covs", e_nodes_cov.shape, p_nodes_cov.shape)
plot_nodes_over_data_1d_components(X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov)
plot_nodes_over_data_scattermatrix(X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov)
predictions, distances, activities = hebbsom_predict_full(X, Y, mdl)
plot_predictions_over_data(X, Y, mdl)
plot_predictions_over_data_ts(X, Y, mdl)
plot_nodes_over_data_scattermatrix_hexbin(X, Y, mdl, predictions, distances, activities)
plot_hebbsom_links_distances_activations(X, Y, mdl, predictions, distances, activities)
# nodes_e = filter_e.map.neurons[:,:,i]
# nodes_p = filter_p.map.neurons[:,:,i]
# pl.plot(nodes, filter_e.map.neurons[:,:,1], "ko", alpha=0.5, ms=10)
# pl.show()
elif args.modelclass == "GMM":
nodes = np.array(mdl.cen_lst)
covs = np.array(mdl.cov_lst)
# print("nodes,covs shape", nodes.shape, covs.shape)
e_nodes = nodes[:,:idim]
p_nodes = nodes[:,idim:]
e_nodes_cov = covs[:,:idim,:idim]
p_nodes_cov = covs[:,idim:,idim:]
print("nodes", e_nodes, p_nodes)
print("covs", e_nodes_cov.shape, p_nodes_cov.shape)
plot_nodes_over_data_1d_components(X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov)
plot_nodes_over_data_scattermatrix(X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov)
plot_predictions_over_data_ts(X, Y, mdl)
plot_predictions_over_data(X, Y, mdl)
elif args.modelclass in ["KNN", "SOESGP", "STORKGP"]:
# print("hello")
plot_predictions_over_data_ts(X, Y, mdl)
plot_predictions_over_data(X, Y, mdl)
pl.draw()
pl.pause(1e-9)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--datafile", type=str, help="datafile containing t x (dim_extero + dim_proprio) matrix ", default="data/simplearm_n1000/EP_1000.npy")
parser.add_argument("-m", "--modelclass", type=str, help="Which model class [all] to test from " + ", ".join(model_classes), default="all")
parser.add_argument("-n", "--numsteps", type=int, help="Number of datapoints [1000]", default=1000)
parser.add_argument("-ne", "--numepisodes", type=int, help="Number of episodes [10]", default=10)
parser.add_argument("-s", "--seed", type=int, help="seed for RNG [0]", default=0)
args = parser.parse_args()
if args.modelclass == "all":
pl.ion()
for mdlcls in ["KNN", "SOESGP", "STORKGP", "GMM", "HebbSOM"]:
args.modelclass = mdlcls
test_model(args)
else:
test_model(args)
pl.ioff()
pl.show()
| {
"repo_name": "x75/actinf",
"path": "actinf_models.py",
"copies": "1",
"size": "61457",
"license": "mit",
"hash": 3526740483992469000,
"line_mean": 40.7506793478,
"line_max": 251,
"alpha_frac": 0.5319003531,
"autogenerated": false,
"ratio": 3.1472832488349467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9110146790553821,
"avg_score": 0.013807362276225185,
"num_lines": 1472
} |
"""Active learning by learning (ALBL)
This module includes two classes. ActiveLearningByLearning is the main
algorithm for ALBL and Exp4P is the multi-armed bandit algorithm which will be
used in ALBL.
"""
from __future__ import division
import copy
import numpy as np
from libact.base.interfaces import QueryStrategy
from libact.utils import inherit_docstring_from, seed_random_state, zip
class ActiveLearningByLearning(QueryStrategy):
r"""Active Learning By Learning (ALBL) query strategy.
ALBL is an active learning algorithm that adaptively choose among existing
query strategies to decide which data to make query. It utilizes Exp4.P, a
multi-armed bandit algorithm to adaptively make such decision. More details
of ALBL can refer to the work listed in the reference section.
Parameters
----------
T : integer
Query budget, the maximal number of queries to be made.
query_strategies : list of :py:mod:`libact.query_strategies`\
object instance
The active learning algorithms used in ALBL, which will be both the
the arms in the multi-armed bandit algorithm Exp4.P.
Note that these query_strategies should share the same dataset
instance with ActiveLearningByLearning instance.
delta : float, optional (default=0.1)
Parameter for Exp4.P.
uniform_sampler : {True, False}, optional (default=True)
Determining whether to include uniform random sample as one of arms.
pmin : float, 0<pmin< :math:`\frac{1}{len(query\_strategies)}`,\
optional (default= :math:`\frac{\sqrt{\log{N}}}{KT}`)
Parameter for Exp4.P. The minimal probability for random selection of
the arms (aka the underlying active learning algorithms). N = K =
number of query_strategies, T is the number of query budgets.
model : :py:mod:`libact.models` object instance
The learning model used for the task.
random_state : {int, np.random.RandomState instance, None}, optional (default=None)
If int or None, random_state is passed as parameter to generate
np.random.RandomState instance. if np.random.RandomState instance,
random_state is the random number generate.
Attributes
----------
query_strategies\_ : list of :py:mod:`libact.query_strategies` object instance
The active learning algorithm instances.
exp4p\_ : instance of Exp4P object
The multi-armed bandit instance.
queried_hist\_ : list of integer
A list of entry_id of the dataset which is queried in the past.
random_states\_ : np.random.RandomState instance
The random number generator using.
Examples
--------
Here is an example of how to declare a ActiveLearningByLearning
query_strategy object:
.. code-block:: python
from libact.query_strategies import ActiveLearningByLearning
from libact.query_strategies import HintSVM
from libact.query_strategies import UncertaintySampling
from libact.models import LogisticRegression
qs = ActiveLearningByLearning(
dataset, # Dataset object
T=100, # qs.make_query can be called for at most 100 times
query_strategies=[
UncertaintySampling(dataset, model=LogisticRegression(C=1.)),
UncertaintySampling(dataset, model=LogisticRegression(C=.01)),
HintSVM(dataset)
],
model=LogisticRegression()
)
The :code:`query_strategies` parameter is a list of
:code:`libact.query_strategies` object instances where each of their
associated dataset must be the same :code:`Dataset` instance. ALBL combines
the result of these query strategies and generate its own suggestion of
which sample to query. ALBL will adaptively *learn* from each of the
decision it made, using the given supervised learning model in
:code:`model` parameter to evaluate its IW-ACC.
References
----------
.. [1] Wei-Ning Hsu, and Hsuan-Tien Lin. "Active Learning by Learning."
Twenty-Ninth AAAI Conference on Artificial Intelligence. 2015.
"""
def __init__(self, *args, **kwargs):
super(ActiveLearningByLearning, self).__init__(*args, **kwargs)
self.query_strategies_ = kwargs.pop('query_strategies', None)
if self.query_strategies_ is None:
raise TypeError(
"__init__() missing required keyword-only argument: "
"'query_strategies'"
)
elif not self.query_strategies_:
raise ValueError("query_strategies list is empty")
# check if query_strategies share the same dataset with albl
for qs in self.query_strategies_:
if qs.dataset != self.dataset:
raise ValueError("query_strategies should share the same"
"dataset instance with albl")
# parameters for Exp4.p
self.delta = kwargs.pop('delta', 0.1)
# query budget
self.T = kwargs.pop('T', None)
if self.T is None:
raise TypeError(
"__init__() missing required keyword-only argument: 'T'"
)
self.unlabeled_entry_ids, _ = self.dataset.get_unlabeled_entries()
self.unlabeled_invert_id_idx = {}
for i, idx in enumerate(self.dataset.get_unlabeled_entries()[0]):
self.unlabeled_invert_id_idx[idx] = i
self.uniform_sampler = kwargs.pop('uniform_sampler', True)
if not isinstance(self.uniform_sampler, bool):
raise ValueError("'uniform_sampler' should be {True, False}")
self.pmin = kwargs.pop('pmin', None)
n_algorithms = (len(self.query_strategies_) + self.uniform_sampler)
if self.pmin and (self.pmin > (1. / n_algorithms) or self.pmin < 0):
raise ValueError("'pmin' should be 0 < pmin < "
"1/len(n_active_algorithm)")
self.exp4p_ = Exp4P(
query_strategies=self.query_strategies_,
T=self.T,
delta=self.delta,
pmin=self.pmin,
unlabeled_invert_id_idx=self.unlabeled_invert_id_idx,
uniform_sampler=self.uniform_sampler
)
self.budget_used = 0
# classifier instance
self.model = kwargs.pop('model', None)
if self.model is None:
raise TypeError(
"__init__() missing required keyword-only argument: 'model'"
)
random_state = kwargs.pop('random_state', None)
self.random_state_ = seed_random_state(random_state)
self.query_dist = None
self.W = []
self.queried_hist_ = []
def calc_reward_fn(self):
"""Calculate the reward value"""
model = copy.copy(self.model)
model.train(self.dataset)
# reward function: Importance-Weighted-Accuracy (IW-ACC) (tau, f)
reward = 0.
for i in range(len(self.queried_hist_)):
reward += self.W[i] * (
model.predict(
self.dataset.data[
self.queried_hist_[i]][0].reshape(1, -1)
)[0] ==
self.dataset.data[self.queried_hist_[i]][1]
)
reward /= (self.dataset.len_labeled() + self.dataset.len_unlabeled())
reward /= self.T
return reward
def calc_query(self):
"""Calculate the sampling query distribution"""
# initial query
if self.query_dist is None:
self.query_dist = self.exp4p_.next(-1, None, None)
else:
self.query_dist = self.exp4p_.next(
self.calc_reward_fn(),
self.queried_hist_[-1],
self.dataset.data[self.queried_hist_[-1]][1]
)
return
@inherit_docstring_from(QueryStrategy)
def update(self, entry_id, label):
# Calculate the next query after updating the question asked with an
# answer.
ask_idx = self.unlabeled_invert_id_idx[entry_id]
self.W.append(1. / self.query_dist[ask_idx])
self.queried_hist_.append(entry_id)
@inherit_docstring_from(QueryStrategy)
def make_query(self):
dataset = self.dataset
try:
unlabeled_entry_ids, _ = dataset.get_unlabeled_entries()
except ValueError:
# might be no more unlabeled data left
return
while self.budget_used < self.T:
self.calc_query()
ask_idx = self.random_state_.choice(
np.arange(len(self.unlabeled_invert_id_idx)),
size=1,
p=self.query_dist
)[0]
ask_id = self.unlabeled_entry_ids[ask_idx]
if ask_id in unlabeled_entry_ids:
self.budget_used += 1
return ask_id
else:
self.update(ask_id, dataset.data[ask_id][1])
raise ValueError("Out of query budget")
class Exp4P(object):
r"""A multi-armed bandit algorithm Exp4.P.
For the Exp4.P used in ALBL, the number of arms (actions) and number of
experts are equal to the number of active learning algorithms wanted to
use. The arms (actions) are the active learning algorithms, where is
inputed from parameter 'query_strategies'. There is no need for the input
of experts, the advice of the kth expert are always equal e_k, where e_k is
the kth column of the identity matrix.
Parameters
----------
query_strategies : QueryStrategy instances
The active learning algorithms wanted to use, it is equivalent to
actions or arms in original Exp4.P.
unlabeled_invert_id_idx : dict
A look up table for the correspondance of entry_id to the index of the
unlabeled data.
delta : float, >0, optional (default=0.1)
A parameter.
pmin : float, 0<pmin<1/len(query_strategies), optional (default= :math:`\frac{\sqrt{log(N)}}{KT}`)
The minimal probability for random selection of the arms (aka the
unlabeled data), N = K = number of query_strategies, T is the maximum
number of rounds.
T : int, optional (default=100)
The maximum number of rounds.
uniform_sampler : {True, False}, optional (default=Truee)
Determining whether to include uniform random sampler as one of the
underlying active learning algorithms.
Attributes
----------
t : int
The current round this instance is at.
N : int
The number of arms (actions) in this exp4.p instance.
query_models\_ : list of :py:mod:`libact.query_strategies` object instance
The underlying active learning algorithm instances.
References
----------
.. [1] Beygelzimer, Alina, et al. "Contextual bandit algorithms with
supervised learning guarantees." In Proceedings on the International
Conference on Artificial Intelligence and Statistics (AISTATS),
2011u.
"""
def __init__(self, *args, **kwargs):
""" """
# QueryStrategy class object instances
self.query_strategies_ = kwargs.pop('query_strategies', None)
if self.query_strategies_ is None:
raise TypeError(
"__init__() missing required keyword-only argument: "
"'query_strategies'"
)
elif not self.query_strategies_:
raise ValueError("query_strategies list is empty")
# whether to include uniform random sampler as one of underlying active
# learning algorithms
self.uniform_sampler = kwargs.pop('uniform_sampler', True)
# n_armss
if self.uniform_sampler:
self.N = len(self.query_strategies_) + 1
else:
self.N = len(self.query_strategies_)
# weight vector to each query_strategies, shape = (N, )
self.w = np.array([1. for _ in range(self.N)])
# max iters
self.T = kwargs.pop('T', 100)
# delta > 0
self.delta = kwargs.pop('delta', 0.1)
# n_arms = n_models (n_query_algorithms) in ALBL
self.K = self.N
# p_min in [0, 1/n_arms]
self.pmin = kwargs.pop('pmin', None)
if self.pmin is None:
self.pmin = np.sqrt(np.log(self.N) / self.K / self.T)
self.exp4p_gen = self.exp4p()
self.unlabeled_invert_id_idx = kwargs.pop('unlabeled_invert_id_idx')
if not self.unlabeled_invert_id_idx:
raise TypeError(
"__init__() missing required keyword-only argument:"
"'unlabeled_invert_id_idx'"
)
def __next__(self, reward, ask_id, lbl):
"""For Python3 compatibility of generator."""
return self.next(reward, ask_id, lbl)
def next(self, reward, ask_id, lbl):
"""Taking the label and the reward value of last question and returns
the next question to ask."""
# first run don't have reward, TODO exception on reward == -1 only once
if reward == -1:
return next(self.exp4p_gen)
else:
# TODO exception on reward in [0, 1]
return self.exp4p_gen.send((reward, ask_id, lbl))
def exp4p(self):
"""The generator which implements the main part of Exp4.P.
Parameters
----------
reward: float
The reward value calculated from ALBL.
ask_id: integer
The entry_id of the sample point ALBL asked.
lbl: integer
The answer received from asking the entry_id ask_id.
Yields
------
q: array-like, shape = [K]
The query vector which tells ALBL what kind of distribution if
should sample from the unlabeled pool.
"""
while True:
# TODO probabilistic active learning algorithm
# len(self.unlabeled_invert_id_idx) is the number of unlabeled data
query = np.zeros((self.N, len(self.unlabeled_invert_id_idx)))
if self.uniform_sampler:
query[-1, :] = 1. / len(self.unlabeled_invert_id_idx)
for i, model in enumerate(self.query_strategies_):
query[i][self.unlabeled_invert_id_idx[model.make_query()]] = 1
# choice vector, shape = (self.K, )
W = np.sum(self.w)
p = (1 - self.K * self.pmin) * self.w / W + self.pmin
# query vector, shape= = (self.n_unlabeled, )
query_vector = np.dot(p, query)
reward, ask_id, _ = yield query_vector
ask_idx = self.unlabeled_invert_id_idx[ask_id]
rhat = reward * query[:, ask_idx] / query_vector[ask_idx]
# The original advice vector in Exp4.P in ALBL is a identity matrix
yhat = rhat
vhat = 1 / p
self.w = self.w * np.exp(
self.pmin / 2 * (
yhat + vhat * np.sqrt(
np.log(self.N / self.delta) / self.K / self.T
)
)
)
raise StopIteration
| {
"repo_name": "ntucllab/libact",
"path": "libact/query_strategies/active_learning_by_learning.py",
"copies": "1",
"size": "15164",
"license": "bsd-2-clause",
"hash": -4136409435558036500,
"line_mean": 35.5397590361,
"line_max": 102,
"alpha_frac": 0.6003692957,
"autogenerated": false,
"ratio": 4.022281167108753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0000798085951778274,
"num_lines": 415
} |
""" Active Learning by QUerying Informative and Representative Examples (QUIRE)
This module contains a class that implements an active learning algorithm
(query strategy): QUIRE
"""
import bisect
import numpy as np
from sklearn.metrics.pairwise import linear_kernel, polynomial_kernel,\
rbf_kernel
from libact.base.interfaces import QueryStrategy
class QUIRE(QueryStrategy):
"""Querying Informative and Representative Examples (QUIRE)
Query the most informative and representative examples where the metrics
measuring and combining are done using min-max approach.
Parameters
----------
lambda: float, optional (default=1.0)
A regularization parameter used in the regularization learning
framework.
kernel : {'linear', 'poly', 'rbf', callable}, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', or a callable.
If a callable is given it is used to pre-compute the kernel matrix
from data matrices; that matrix should be an array of shape
``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=1.)
Kernel coefficient for 'rbf', 'poly'.
coef0 : float, optional (default=1.)
Independent term in kernel function.
It is only significant in 'poly'.
Attributes
----------
Examples
--------
Here is an example of declaring a QUIRE query_strategy object:
.. code-block:: python
from libact.query_strategies import QUIRE
qs = QUIRE(
dataset, # Dataset object
)
References
----------
.. [1] S.-J. Huang, R. Jin, and Z.-H. Zhou. Active learning by querying
informative and representative examples.
"""
def __init__(self, *args, **kwargs):
super(QUIRE, self).__init__(*args, **kwargs)
self.Uindex = self.dataset.get_unlabeled_entries()[0].tolist()
self.Lindex = np.where(self.dataset.get_labeled_mask())[0].tolist()
# self.Uindex = [
# idx for idx, _ in self.dataset.get_unlabeled_entries()
# ]
# self.Lindex = [
# idx for idx in range(len(self.dataset)) if idx not in self.Uindex
# ]
self.lmbda = kwargs.pop('lambda', 1.)
X, self.y = self.dataset.get_entries()
self.y = list(self.y)
self.kernel = kwargs.pop('kernel', 'rbf')
if self.kernel == 'rbf':
self.K = rbf_kernel(X=X, Y=X, gamma=kwargs.pop('gamma', 1.))
elif self.kernel == 'poly':
self.K = polynomial_kernel(X=X,
Y=X,
coef0=kwargs.pop('coef0', 1),
degree=kwargs.pop('degree', 3),
gamma=kwargs.pop('gamma', 1.))
elif self.kernel == 'linear':
self.K = linear_kernel(X=X, Y=X)
elif hasattr(self.kernel, '__call__'):
self.K = self.kernel(X=np.array(X), Y=np.array(X))
else:
raise NotImplementedError
if not isinstance(self.K, np.ndarray):
raise TypeError('K should be an ndarray')
if self.K.shape != (len(X), len(X)):
raise ValueError(
'kernel should have size (%d, %d)' % (len(X), len(X)))
self.L = np.linalg.inv(self.K + self.lmbda * np.eye(len(X)))
def update(self, entry_id, label):
bisect.insort(a=self.Lindex, x=entry_id)
self.Uindex.remove(entry_id)
self.y[entry_id] = label
def make_query(self):
L = self.L
Lindex = self.Lindex
Uindex = self.Uindex
query_index = -1
min_eva = np.inf
y_labeled = np.array([label for label in self.y if label is not None])
det_Laa = np.linalg.det(L[np.ix_(Uindex, Uindex)])
# efficient computation of inv(Laa)
M3 = np.dot(self.K[np.ix_(Uindex, Lindex)],
np.linalg.inv(self.lmbda * np.eye(len(Lindex))))
M2 = np.dot(M3, self.K[np.ix_(Lindex, Uindex)])
M1 = self.lmbda * np.eye(len(Uindex)) + self.K[np.ix_(Uindex, Uindex)]
inv_Laa = M1 - M2
iList = list(range(len(Uindex)))
if len(iList) == 1:
return Uindex[0]
for i, each_index in enumerate(Uindex):
# go through all unlabeled instances and compute their evaluation
# values one by one
Uindex_r = Uindex[:]
Uindex_r.remove(each_index)
iList_r = iList[:]
iList_r.remove(i)
inv_Luu = inv_Laa[np.ix_(iList_r, iList_r)] - 1 / inv_Laa[i, i] * \
np.dot(inv_Laa[iList_r, i], inv_Laa[iList_r, i].T)
tmp = np.dot(
L[each_index][Lindex] -
np.dot(
np.dot(
L[each_index][Uindex_r],
inv_Luu
),
L[np.ix_(Uindex_r, Lindex)]
),
y_labeled,
)
eva = L[each_index][each_index] - \
det_Laa / L[each_index][each_index] + 2 * np.abs(tmp)
if eva < min_eva:
query_index = each_index
min_eva = eva
return query_index
| {
"repo_name": "ntucllab/libact",
"path": "libact/query_strategies/quire.py",
"copies": "1",
"size": "5471",
"license": "bsd-2-clause",
"hash": 7316460939588419000,
"line_mean": 34.525974026,
"line_max": 79,
"alpha_frac": 0.543593493,
"autogenerated": false,
"ratio": 3.721768707482993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9765362200482993,
"avg_score": 0,
"num_lines": 154
} |
""" Active learning suggestions.
Module structure:
- Arm
- RandomArm
- WeightedArm
- MarginArm
- ConfidenceArm
- EntropyArm
- CommitteeArm
- QBBMarginArm
- QBBKLArm
"""
# Author: Alasdair Tran
# License: BSD 3 clause
import logging
import numpy as np
from abc import ABC, abstractmethod
from numpy.random import RandomState
from scipy.stats import itemfreq
__all__ = ['RandomArm',
'MarginArm',
'ConfidenceArm',
'EntropyArm',
'QBBMarginArm',
'QBBKLArm']
logger = logging.getLogger(__name__)
class Arm(ABC):
""" Abstract base class for an active learning arm.
This class cannot be used directly but instead serves as the base class for
all active learning suggestions. Each suggestion implements the `select`
method, which return the indices of the objects in the pool for labelling,
based on a particular heuristic.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
"""
def __init__(self, pool, labels, random_state=None):
self.pool = pool
self.labels = labels
if type(random_state) is RandomState:
self.seed = random_state
else:
self.seed = RandomState(random_state)
@abstractmethod
def score(self, candidate_mask, predictions):
""" Compute the score of each candidate. """
pass
def select(self, candidate_mask, predictions, n_best_candidates):
""" Pick the candidates with the hihgest scores.
Parameters
----------
candidate_mask : numpy boolean array
The boolean array that tells us which examples the arm is allowed to examine.
predictions : numpy array
Current class probabilities of the unlabelled candidates.
n_best_candidates : int, optional (default=1)
The number of best candidates to be returned.
Returns
-------
best_candidates : int
The indices of the best candidates.
"""
scores = self.score(candidate_mask, predictions)
best_candidates = self._select_from_scores(candidate_mask, scores, n_best_candidates)
return best_candidates
def _select_from_scores(self, candidate_mask, candidate_scores, n_best_candidates):
""" Pick the candidates with the highest scores. """
pool_scores = np.full(len(candidate_mask), -np.inf)
pool_scores[candidate_mask] = candidate_scores
# make sure we don't return non-candidates
n_best_candidates = min(n_best_candidates, len(candidate_scores))
# sort from largest to smallest and pick the candidate(s) with the highest score(s)
best_candidates = np.argsort(-pool_scores)[:n_best_candidates]
return best_candidates
class RandomArm(Arm):
""" Pick random candidates from the unlabelled pool for querying (passive learning).
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
"""
def score(self, candidate_mask, predictions):
""" Pick random candidates from the unlabelled pool.
Parameters
----------
candidate_mask : numpy boolean array
The boolean array that tells us which examples the arm is allowed to examine.
predictions : numpy array
Current class probabilities of the unlabelled candidates.
Returns
-------
scores : [float]
The scores of the best candidates.
"""
return self.seed.rand(len(predictions))
class WeightedArm(Arm):
""" Abstract base class for a weighted active learning arm.
This class cannot be used directly but instead serves as the base class for
all weighted active learning suggestions. The weight uses a pre-computed
similarity matrix to obtain the information density for each candidate.
Candidates in a more dense region are given a higher weight.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
similarity : numpy array of shape [n_samples, n_samples], optional (default=None)
A similarity matrix of all the examples in the pool. If not given,
the information density will not be used.
"""
def __init__(self, pool, labels, random_state=None, similarity=None):
super().__init__(pool, labels, random_state)
self.similarity = similarity
def _select_from_scores(self, candidate_mask, candidate_scores, n_best_candidates):
""" Pick the candidates with the highest scores, optionally weighted by the density. """
if self.similarity is not None:
density_weight = self.similarity[np.ix_(candidate_mask, self.labels.mask)]
density_weight = np.mean(density_weight, axis=1)
candidate_scores *= density_weight
best_candidates = super()._select_from_scores(candidate_mask, candidate_scores,
n_best_candidates)
return best_candidates
class MarginArm(WeightedArm):
""" Suggests the candidate with the smallest margin.
The margin is defined as the difference between the two largest values
in the prediction vector.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
similarity : numpy array of shape [n_samples, n_samples], optional (default=None)
A similarity matrix of all the examples in the pool. If not given,
The information density will not be used.
"""
def score(self, candidate_mask, predictions):
""" Pick the candidates with the smallest margin.
Parameters
----------
candidate_mask : numpy boolean array
The boolean array that tells us which examples the arm is allowed to examine.
predictions : numpy array
Current class probabilities of the unlabelled candidates.
Returns
-------
scores : [float]
The scores of the candidates.
"""
# sort the probabilities from smallest to largest
predictions = np.sort(predictions, axis=1)
# compute the margin (difference between two largest probabilities)
# the minus in front is there so that we can assign a higher score
# to those candidates with a smaller margin
margin = 1 - np.abs(predictions[:, -1] - predictions[:, -2])
return margin
class ConfidenceArm(WeightedArm):
""" Suggests the candidate that we are least confident about its most likely labelling.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
similarity : numpy array of shape [n_samples, n_samples], optional (default=None)
A similarity matrix of all the examples in the pool. If not given,
The information density will not be used.
"""
def score(self, candidate_mask, predictions):
""" Pick the candidates with the smallest probability of the most likely label.
Parameters
----------
candidate_mask : numpy boolean array
The boolean array that tells us which examples the arm is allowed to examine.
predictions : numpy array
Current class probabilities of the unlabelled candidates.
Returns
-------
scores : [float]
The scores of the candidates.
"""
# extract the probability of the most likely label
most_likely_probs = 1 - np.max(predictions, axis=1)
return most_likely_probs
class EntropyArm(WeightedArm):
""" Suggests the candidates whose prediction vectors display the greatest entropy.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
similarity : numpy array of shape [n_samples, n_samples], optional (default=None)
A similarity matrix of all the examples in the pool. If not given,
The information density will not be used.
"""
def score(self, candidate_mask, predictions):
""" Pick the candidates whose prediction vectors display the greatest entropy.
Parameters
----------
candidate_mask : numpy boolean array
The boolean array that tells us which examples the arm is allowed to examine.
predictions : numpy array
Current class probabilities of the unlabelled candidates.
Returns
-------
scores : [float]
The scores of the candidates.
"""
# comptue Shannon entropy
# in case of 0 * log(0), need to tell numpy to set it to zero
entropy = -np.sum(np.nan_to_num(predictions * np.log(predictions)), axis=1)
return entropy
class CommitteeArm(WeightedArm):
""" Abstract base class for a committee active learning arm.
This class cannot be used directly but instead serves as the base class for
all active learning suggestions that involve a committee.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
committee : BaggingClassifier object
The committee should have the same interface as scikit-learn BaggingClassifier.
n_committee_samples : int, optional (default=300)
The maximum number of training examples that are given to each committee member
during training.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
similarity : numpy array of shape [n_samples, n_samples], optional (default=None)
A similarity matrix of all the examples in the pool. If not given,
The information density will not be used.
"""
def __init__(self, pool, labels, committee, n_committee_samples=300,
random_state=None, similarity=None):
super().__init__(pool, labels, random_state, similarity)
self.committee = committee
self.n_committee_samples=n_committee_samples
class QBBMarginArm(CommitteeArm):
""" Pick the candidates with the smallest average margins.
We first use bagging to train a number of classifiers. The margin is then defined as
the average difference between the two largest values in the prediction vector.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
committee : BaggingClassifier object
The committee should have the same interface as scikit-learn BaggingClassifier.
n_committee_samples : int, optional (default=300)
The maximum number of training examples that are given to each committee member
during training.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
similarity : numpy array of shape [n_samples, n_samples], optional (default=None)
A similarity matrix of all the examples in the pool. If not given,
The information density will not be used.
"""
def score(self, candidate_mask, predictions):
""" Compute the average margin of each candidate.
Parameters
----------
candidate_mask : numpy boolean array
The boolean array that tells us which examples the arm is allowed to examine.
predictions : numpy array
Current class probabilities of the unlabelled candidates.
Returns
-------
scores : [float]
The scores of the candidates.
"""
# check that the max bagging sample is not too big
self.committee.max_samples = min(self.n_committee_samples, np.sum(~self.labels.mask))
# train the committee
try:
self.committee.fit(self.pool[~self.labels.mask], self.labels[~self.labels.mask])
# the classifier will fail if there is only one class in the training set
except ValueError:
logger.info('Iteration {}: Class distribution is too skewed.'.format(
np.sum(~self.labels.mask)) +
'Falling back to passive learning.')
return self.seed.rand(len(predictions))
committee_predictions = self._predict(candidate_mask)
# sort the probabilities from smallest to largest
committee_predictions = np.sort(committee_predictions, axis=1)
# compute the margin (difference between two largest probabilities)
# the minus in front is there so that we can assign a higher score
# to those candidates with a smaller margin
margin = 1 - np.abs(committee_predictions[:,-1] - committee_predictions[:,-2])
return margin
def _predict(self, candidate_mask):
""" Generate prediction vectors for the unlabelled candidates. """
n_samples = len(self.pool[candidate_mask])
n_classes = len(self.committee.classes_)
probs = np.zeros((n_samples, n_classes))
class_freq = itemfreq(self.labels[~self.labels.mask])
for member in self.committee.estimators_:
member_prob = member.predict_proba(self.pool[candidate_mask])
member_n_classes = member_prob.shape[1]
if n_classes == member_n_classes:
probs += member_prob
else:
member_classes = class_freq[:,1].argsort()[::-1]
member_classes = member_classes[:member_n_classes]
probs[:, member_classes] += member_prob[:, range(member_n_classes)]
# average out the probabilities
probs /= len(self.committee.estimators_)
return probs
class QBBKLArm(CommitteeArm):
""" Pick the candidates with the largest average KL divergence from the mean.
We first use bagging to train a number of classifiers. We then choose the candidate
that has the largest Kullback–Leibler divergence from the average.
Parameters
----------
pool : numpy array of shape [n_samples, n_features]
The feature matrix of all the examples (labelled and unlabelled).
labels : numpy masked array of shape [n_samples].
The missing entries of y corresponds to the unlabelled examples.
committee : BaggingClassifier object
The committee should have the same interface as scikit-learn BaggingClassifier.
n_committee_samples : int, optional (default=300)
The maximum number of training examples that are given to each committee member
during training.
random_state : int or RandomState object, optional (default=None)
Provide a random seed if the results need to be reproducible.
similarity : numpy array of shape [n_samples, n_samples], optional (default=None)
A similarity matrix of all the examples in the pool. If not given,
The information density will not be used.
"""
def score(self, candidate_mask, predictions):
""" Pick the candidates with the largest average KL divergence from the mean.
Parameters
----------
candidate_mask : numpy boolean array
The boolean array that tells us which examples the arm is allowed to examine.
predictions : numpy array
Current class probabilities of the unlabelled candidates.
Returns
-------
scores : [float]
The scores of the candidates.
"""
# check that the max bagging sample is not too big
self.committee.max_samples = min(self.n_committee_samples, np.sum(~self.labels.mask))
# train the committee
try:
self.committee.fit(self.pool[~self.labels.mask], self.labels[~self.labels.mask])
# the classifier will fail if there is only one class in the training set
except ValueError:
logger.info('Iteration {}: Class distribution is too skewed.'.format(
np.sum(~self.labels.mask)) +
'Falling back to passive learning.')
return self.seed.rand(len(predictions))
avg_probs, prob_list = self._predict(candidate_mask)
# compute the KL divergence
avg_kl = np.zeros(avg_probs.shape[0])
for p in prob_list:
inner = np.nan_to_num(p * np.log(p / avg_probs))
member_kl = np.sum(inner, axis=1)
avg_kl += member_kl
# average out the KL divergence
avg_kl /= len(self.committee)
return avg_kl
def _predict(self, candidate_mask):
""" Generate prediction vectors for the unlabelled candidates. """
n_samples = len(self.pool[candidate_mask])
n_classes = len(self.committee.classes_)
avg_probs = np.zeros((n_samples, n_classes))
prob_list = []
class_freq = itemfreq(self.labels[~self.labels.mask])
for member in self.committee.estimators_:
member_prob = member.predict_proba(self.pool[candidate_mask])
member_n_classes = member_prob.shape[1]
if n_classes == member_n_classes:
avg_probs += member_prob
prob_list.append(member_prob)
else:
member_classes = class_freq[:,1].argsort()[::-1]
member_classes = member_classes[:member_n_classes]
full_member_prob = np.zeros((n_samples, n_classes))
full_member_prob[:, member_classes] += member_prob[:, range(member_n_classes)]
avg_probs += full_member_prob
prob_list.append(full_member_prob)
# average out the probabilities
avg_probs /= len(self.committee.estimators_)
return (avg_probs, prob_list)
| {
"repo_name": "chengsoonong/mclass-sky",
"path": "mclearn/arms.py",
"copies": "1",
"size": "20980",
"license": "bsd-3-clause",
"hash": 9112674203356257000,
"line_mean": 38.5811320755,
"line_max": 96,
"alpha_frac": 0.6220802746,
"autogenerated": false,
"ratio": 4.700425722608111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5822505997208112,
"avg_score": null,
"num_lines": null
} |
"""Active learning with uncertainty sampling.
Pool-based. Binary class labels.
Matthew Alger
The Australian National University
2016
"""
import numpy
from .sampler import Sampler
class ConfidenceUncertaintySampler(Sampler):
"""Pool-based active learning with uncertainty sampling, with uncertainty
based on confidence."""
def __init__(self, pool, labels, Classifier, classifier_params=None):
"""
pool: (n_samples, n_features) array of partially labelled data points.
labels: (n_samples,) masked array of binary labels.
classifier: Binary classifier class implementing a sklearn interface.
classifier_params: Parameters to pass to Classifier. Default None.
"""
super().__init__(pool, labels, Classifier,
classifier_params=classifier_params)
self.compute_uncertainties()
def compute_uncertainties(self):
"""Finds uncertainties for all objects in the pool."""
# To keep things simple, I'll use the (negative) proximity to the
# decision boundary as the uncertainty. Note that the uncertainties
# array is masked such that labelled points have no uncertainty.
probs = self.classifier.predict_proba(self.pool)[:, 1]
self.uncertainties = numpy.ma.masked_array(-numpy.abs(probs - 0.5),
mask=~self.labels.mask)
def sample_index(self):
"""Finds index of the least certain unlabelled point."""
index = self.uncertainties.argmax()
return index
def sample_indicies(self, n):
"""Finds indices of the n least certain unlabelled points."""
indices = self.uncertainties.argsort()
return indices[-n:]
def retrain(self):
"""Retrains the classifier."""
super().retrain()
self.compute_uncertainties()
| {
"repo_name": "chengsoonong/crowdastro",
"path": "crowdastro/active_learning/uncertainty_sampler.py",
"copies": "1",
"size": "1880",
"license": "mit",
"hash": -2139662655689205800,
"line_mean": 35.1538461538,
"line_max": 78,
"alpha_frac": 0.6505319149,
"autogenerated": false,
"ratio": 4.444444444444445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 52
} |
import web
from activerecord import ModelStorage
class Field(object):
PROPERTIES = ['type', 'max_length', 'key', 'auto_increment', 'null', 'default']
def __init__(self, **kwargs):
for property in Field.PROPERTIES:
method = 'set_%s' % property
if not kwargs.get(property, None):
getattr(self, method)()
else:
getattr(self, method)(kwargs[property])
def set_type(self, type):
types = ['varchar', 'tinyint', 'text', 'int', 'float', 'char', 'date', 'datetime', 'timestamp', 'enum', 'set', 'bool', 'binary']
if not type in types:
raise ValueError(type)
self.type = type
def set_max_length(self, max_length):
self.max_length = int(max_length)
def set_key(self, key=None):
keys = ['primary', 'index', 'unique']
if key and not key in keys:
raise ValueError(key)
self.key = key
def set_auto_increment(self, value=False):
if not isinstance(value, bool):
raise ValueError(value)
self.auto_increment = value
def set_null(self, value=False):
if not isinstance(value, bool):
raise ValueError(value)
self.null = value
def set_default(self, value=None):
self.default = value
class RelatedField(object):
def __init__(self, model, field=None):
self.model = model
if isinstance(self.model, basestring):
self.model = ModelStorage.get(model)
self.field = field
class ForeignKeyField(RelatedField):
def __get__(self, instance, owner):
if not instance:
return self.model
if not self.field:
self.field = '%s_id' % self.model.Meta.table
conditions = {self.model.Meta.pk: getattr(instance, self.field)}
return dict(model=self.model, conditions=conditions)[0]
class OneToManyField(RelatedField):
def __get__(self, instance, owner):
if not instance:
return self.model
if not self.field:
self.field = '%s_id' % instance.Meta.table
conditions = {self.field: getattr(instance, instance.Meta.pk)}
return dict(model=self.model, conditions=conditions)
class ManyToManyField(RelatedField):
pass
if __name__ == "__main__":
web.config.set('istest', True)
import doctest
doctest.testmod()
| {
"repo_name": "fedecarg/webpy-activerecord",
"path": "src/fields.py",
"copies": "1",
"size": "3445",
"license": "mit",
"hash": -4597590910289222700,
"line_mean": 32.7745098039,
"line_max": 137,
"alpha_frac": 0.6386066763,
"autogenerated": false,
"ratio": 4.106078665077473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.022822365661240344,
"num_lines": 102
} |
from __future__ import print_function
import collections
import functools
from itertools import ifilterfalse
from heapq import nsmallest
from operator import itemgetter
class Counter(dict):
'Mapping where default values are zero'
def __missing__(self, key):
return 0
def lru_cache(maxsize=100):
'''Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
'''
maxqueue = maxsize * 10
def decorating_function(user_function,
len=len, iter=iter, tuple=tuple, sorted=sorted, KeyError=KeyError):
cache = {} # mapping of args to results
queue = collections.deque() # order that keys have been used
refcount = Counter() # times each key is in the queue
sentinel = object() # marker for looping around the queue
kwd_mark = object() # separate positional and keyword args
# lookup optimizations (ugly but fast)
queue_append, queue_popleft = queue.append, queue.popleft
queue_appendleft, queue_pop = queue.appendleft, queue.pop
@functools.wraps(user_function)
def wrapper(*args, **kwds):
# cache key records both positional and keyword args
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
# record recent use of this key
queue_append(key)
refcount[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least recently used cache entry
if len(cache) > maxsize:
key = queue_popleft()
refcount[key] -= 1
while refcount[key]:
key = queue_popleft()
refcount[key] -= 1
del cache[key], refcount[key]
# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access
if len(queue) > maxqueue:
refcount.clear()
queue_appendleft(sentinel)
for key in ifilterfalse(refcount.__contains__,
iter(queue_pop, sentinel)):
queue_appendleft(key)
refcount[key] = 1
return result
def clear():
cache.clear()
queue.clear()
refcount.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
def lfu_cache(maxsize=100):
'''Least-frequenty-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Least_Frequently_Used
'''
def decorating_function(user_function):
cache = {} # mapping of args to results
use_count = Counter() # times each key has been accessed
kwd_mark = object() # separate positional and keyword args
@functools.wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
use_count[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least frequently used cache entry
if len(cache) > maxsize:
for key, _ in nsmallest(maxsize // 10,
use_count.iteritems(),
key=itemgetter(1)):
del cache[key], use_count[key]
return result
def clear():
cache.clear()
use_count.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
if __name__ == '__main__':
@lru_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
@lfu_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
| {
"repo_name": "zuphilip/ocropy",
"path": "ocrolib/extras/lru.py",
"copies": "3",
"size": "5375",
"license": "apache-2.0",
"hash": 1245717493039622000,
"line_mean": 31.1856287425,
"line_max": 82,
"alpha_frac": 0.5460465116,
"autogenerated": false,
"ratio": 4.47171381031614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.651776032191614,
"avg_score": null,
"num_lines": null
} |
import collections
import functools
from itertools import ifilterfalse
from heapq import nsmallest
from operator import itemgetter
class Counter(dict):
'Mapping where default values are zero'
def __missing__(self, key):
return 0
def lru_cache(maxsize=100):
'''Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
'''
maxqueue = maxsize * 10
def decorating_function(user_function,
len=len, iter=iter, tuple=tuple, sorted=sorted, KeyError=KeyError):
cache = {} # mapping of args to results
queue = collections.deque() # order that keys have been used
refcount = Counter() # times each key is in the queue
sentinel = object() # marker for looping around the queue
kwd_mark = object() # separate positional and keyword args
# lookup optimizations (ugly but fast)
queue_append, queue_popleft = queue.append, queue.popleft
queue_appendleft, queue_pop = queue.appendleft, queue.pop
@functools.wraps(user_function)
def wrapper(*args, **kwds):
# cache key records both positional and keyword args
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
# record recent use of this key
queue_append(key)
refcount[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least recently used cache entry
if len(cache) > maxsize:
key = queue_popleft()
refcount[key] -= 1
while refcount[key]:
key = queue_popleft()
refcount[key] -= 1
del cache[key], refcount[key]
# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access
if len(queue) > maxqueue:
refcount.clear()
queue_appendleft(sentinel)
for key in ifilterfalse(refcount.__contains__,
iter(queue_pop, sentinel)):
queue_appendleft(key)
refcount[key] = 1
return result
def clear():
cache.clear()
queue.clear()
refcount.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
def lfu_cache(maxsize=100):
'''Least-frequenty-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Least_Frequently_Used
'''
def decorating_function(user_function):
cache = {} # mapping of args to results
use_count = Counter() # times each key has been accessed
kwd_mark = object() # separate positional and keyword args
@functools.wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
use_count[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least frequently used cache entry
if len(cache) > maxsize:
for key, _ in nsmallest(maxsize // 10,
use_count.iteritems(),
key=itemgetter(1)):
del cache[key], use_count[key]
return result
def clear():
cache.clear()
use_count.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
if __name__ == '__main__':
@lru_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
@lfu_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
| {
"repo_name": "brobertson/ocropus-bgr",
"path": "ocropy/ocrolib/extras/lru.py",
"copies": "11",
"size": "5336",
"license": "apache-2.0",
"hash": -3571663257881831000,
"line_mean": 31.3393939394,
"line_max": 82,
"alpha_frac": 0.5446026987,
"autogenerated": false,
"ratio": 4.476510067114094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""ActiveTick bar data."""
import activetickpy.convert as convert
class Bar:
"""ActiveTick bar data.
Attributes not specified in the constructor:
milliseconds -- time in milliseconds since the Epoch
open_price -- open price
high_price -- high price
low_price -- low price
close_price -- close price
volume -- volume
"""
def __init__(self, symbol, history_type, intraday_minutes):
"""Initialize a new instance of a Bar.
Keyword arguments:
symbol -- ticker symbol
history_type -- 0=intraday, 1=daily, 2=weekly
intraday_minutes -- 1-60 if history_type=0; 0, otherwise
"""
self.symbol = symbol
self.history_type = history_type
self.intraday_minutes = intraday_minutes
self.milliseconds = 0
self.open_price = 0.0
self.high_price = 0.0
self.low_price = 0.0
self.close_price = 0.0
self.volume = 0
def __repr__(self):
"""Return a string containing a printable representation of an object.
"""
return 'S:{0} HT:{1}|{2} MS:{3} O:{4} H:{5} L:{6} C:{7} V:{8}'.format(
self.symbol, self.history_type, self.intraday_minutes,
self.milliseconds, self.open_price, self.high_price,
self.low_price, self.close_price, self.volume)
def update(self, csv_string):
"""Update the attributes of this Bar with data from the specified CSV
string.
Keyword arguments:
csv_string -- data received from an ActiveTickFeed HTTP server barData
request
"""
elements = csv_string.split(',')
if int(elements[0]) > 0:
self.milliseconds = convert.at_time_to_ms(elements[0],
include_ms=False)
self.open_price = float(elements[1])
self.high_price = float(elements[2])
self.low_price = float(elements[3])
self.close_price = float(elements[4])
self.volume = int(elements[5])
| {
"repo_name": "larmer01/activetickpy",
"path": "bar.py",
"copies": "1",
"size": "2081",
"license": "apache-2.0",
"hash": -934181751546233000,
"line_mean": 32.0317460317,
"line_max": 78,
"alpha_frac": 0.5732820759,
"autogenerated": false,
"ratio": 3.956273764258555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5029555840158555,
"avg_score": null,
"num_lines": null
} |
"""ActiveTick quote data."""
import activetickpy.convert as convert
class Quote:
"""ActiveTick quote data.
Attributes not specified in the constructor:
symbol
open_price
previous_close_price
close_price
last_price
bid_price
ask_price
high_price
low_price
day_high_price
day_low_price
pre_market_open_price
extended_hours_last_price
after_market_close_price
bid_exchange
ask_exchange
last_exchange
last_condition
quote_condition
last_trade_milliseconds
last_quote_milliseconds
day_high_milliseconds
day_low_milliseconds
last_size
bid_size
ask_size
volume
pre_market_volume
after_market_volume
trade_count
pre_market_trade_count
after_market_trade_count
fundamental_equity_name
fundamental_equity_primary_exchange
"""
def __init__(self):
"""Initialize a new instance of a Quote."""
self.symbol = ''
self.open_price = 0.0
self.previous_close_price = 0.0
self.close_price = 0.0
self.last_price = 0.0
self.bid_price = 0.0
self.ask_price = 0.0
self.high_price = 0.0
self.low_price = 0.0
self.day_high_price = 0.0
self.day_low_price = 0.0
self.pre_market_open_price = 0.0
self.extended_hours_last_price = 0.0
self.after_market_close_price = 0.0
self.bid_exchange = None
self.ask_exchange = None
self.last_exchange = None
self.last_condition = None
self.quote_condition = None
self.last_trade_milliseconds = 0
self.last_quote_milliseconds = 0
self.day_high_milliseconds = 0
self.day_low_milliseconds = 0
self.last_size = 0
self.bid_size = 0
self.ask_size = 0
self.volume = 0
self.pre_market_volume = 0
self.after_market_volume = 0
self.trade_count = 0
self.pre_market_trade_count = 0
self.after_market_trade_count = 0
self.fundamental_equity_name = ''
self.fundamental_equity_primary_exchange = ''
def __update_data__(self, field_code, type_code, data):
"""Update the specified field in this Quote.
Keyword arguments:
field_code -- integer representing the field to be updated
type_code -- integer representing the target data type
data -- data as a string
"""
typed_data = convert.get_typed_data(type_code, data)
if field_code == 1:
self.symbol = typed_data
elif field_code == 2:
self.open_price = typed_data
elif field_code == 3:
self.previous_close_price = typed_data
elif field_code == 4:
self.close_price = typed_data
elif field_code == 5:
self.last_price = typed_data
elif field_code == 6:
self.bid_price = typed_data
elif field_code == 7:
self.ask_price = typed_data
elif field_code == 8:
self.high_price = typed_data
elif field_code == 9:
self.low_price = typed_data
elif field_code == 10:
self.day_high_price = typed_data
elif field_code == 11:
self.day_low_price = typed_data
elif field_code == 12:
self.pre_market_open_price = typed_data
elif field_code == 13:
self.extended_hours_last_price = typed_data
elif field_code == 14:
self.after_market_close_price = typed_data
elif field_code == 15:
self.bid_exchange = convert.get_exchange(typed_data)
elif field_code == 16:
self.ask_exchange = convert.get_exchange(typed_data)
elif field_code == 17:
self.last_exchange = convert.get_exchange(typed_data)
elif field_code == 18:
self.last_condition = typed_data
elif field_code == 19:
self.quote_condition = typed_data
elif field_code == 20:
self.last_trade_milliseconds = typed_data
elif field_code == 21:
self.last_quote_milliseconds = typed_data
elif field_code == 22:
self.day_high_milliseconds = typed_data
elif field_code == 23:
self.day_low_milliseconds = typed_data
elif field_code == 24:
self.last_size = typed_data
elif field_code == 25:
self.bid_size = typed_data
elif field_code == 26:
self.ask_size = typed_data
elif field_code == 27:
self.volume = typed_data
elif field_code == 28:
self.pre_market_volume = typed_data
elif field_code == 29:
self.after_market_volume = typed_data
elif field_code == 30:
self.trade_count = typed_data
elif field_code == 31:
self.pre_market_trade_count = typed_data
elif field_code == 32:
self.after_market_trade_count = typed_data
elif field_code == 33:
self.fundamental_equity_name = typed_data
elif field_code == 34:
self.fundamental_equity_primary_exchange = convert.get_exchange(
typed_data)
def update(self, csv_string):
"""Update the attributes of this Quote with data from the specified CSV
string.
Keyword arguments:
csv_string -- data received from an ActiveTickFeed HTTP server
quoteData request
"""
elements = csv_string.split(',')
# General information
self.symbol = elements[0]
status = convert.get_status(int(elements[1]))
if len(status) > 0:
raise RuntimeError(status)
# Quote fields
for index in range(2, len(elements), 4):
field_code = int(elements[index])
status = convert.get_status(int(elements[index + 1]))
if len(status) > 0:
raise RuntimeError(status)
type_code = int(elements[index + 2])
data = elements[index + 3]
self.__update_data__(field_code, type_code, data)
| {
"repo_name": "larmer01/activetickpy",
"path": "quote.py",
"copies": "1",
"size": "6159",
"license": "apache-2.0",
"hash": 2678332728569000400,
"line_mean": 32.1129032258,
"line_max": 79,
"alpha_frac": 0.5736320831,
"autogenerated": false,
"ratio": 3.903041825095057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49766739081950573,
"avg_score": null,
"num_lines": null
} |
"""ActiveTick tick data."""
import activetickpy.convert as convert
class QuoteTick:
"""ActiveTick quote data for a single tick.
Attributes not specified in the constructor:
milliseconds
bid_price
ask_price
bid_size
ask_size
bid_exchange
ask_exchange
condition
"""
def __init__(self, symbol):
"""Initialize a new instance of a QuoteTick.
Keyword arguments:
symbol -- ticker symbol
"""
self.symbol = symbol
self.milliseconds = 0
self.bid_price = 0.0
self.ask_price = 0.0
self.bid_size = 0
self.ask_size = 0
self.bid_exchange = ''
self.ask_exchange = ''
self.condition = ''
def update(self, csv_string):
"""Update the attributes of this QuoteTick with data from the specified
CSV string.
Keyword arguments:
csv_string -- data received from an ActiveTickFeed HTTP server tickData
request
"""
elements = csv_string.split(',')
#record_identifier = elements[0]
self.milliseconds = convert.at_time_to_ms(elements[1])
self.bid_price = float(elements[2])
self.ask_price = float(elements[3])
self.bid_size = int(elements[4])
self.ask_size = int(elements[5])
self.bid_exchange = convert.get_exchange(elements[6])
self.ask_exchange = convert.get_exchange(elements[7])
self.condition = convert.get_trade_condition(int(elements[8]))
class TradeTick:
"""ActiveTick trade data for a single tick.
Attributes not specified in the constructor:
milliseconds
last_price
last_size
last_exchange
condition1
condition2
condition3
condition4
"""
def __init__(self, symbol):
"""Initialize a new instance of a TradeTick.
Keyword arguments:
symbol -- ticker symbol
"""
self.symbol = symbol
self.milliseconds = 0
self.last_price = 0.0
self.last_size = 0
self.last_exchange = ''
self.condition1 = ''
self.condition2 = ''
self.condition3 = ''
self.condition4 = ''
def update(self, csv_string):
"""Update the attributes of this QuoteTick with data from the specified
CSV string.
Keyword arguments:
csv_string -- data received from an ActiveTickFeed HTTP server tickData
request
"""
elements = csv_string.split(',')
#record_identifier = elements[0]
self.milliseconds = convert.at_time_to_ms(elements[1])
self.last_price = float(elements[2])
self.last_size = int(elements[3])
self.last_exchange = convert.get_exchange(elements[4])
self.condition1 = convert.get_trade_condition(int(elements[5]))
self.condition2 = convert.get_trade_condition(int(elements[6]))
self.condition3 = convert.get_trade_condition(int(elements[7]))
self.condition4 = convert.get_trade_condition(int(elements[8]))
def get_tick(symbol, csv_string):
"""Return a QuoteTick or TradeTick instance for the specified symbol and
CSV string. The type of object returned is based on the first field (the
record identifier) in the CSV string. If the record identifier is neither
'T' (trade) or 'Q' (quote), return None.
Keyword arguments:
symbol -- ticker symbol
csv_string -- data received from an ActiveTickFeed HTTP server tickData
request
"""
result = None
elements = csv_string.split(',')
if elements[0] == 'T':
result = TradeTick(symbol)
result.update(csv_string)
elif elements[0] == 'Q':
result = QuoteTick(symbol)
result.update(csv_string)
return result
| {
"repo_name": "larmer01/activetickpy",
"path": "tick.py",
"copies": "1",
"size": "3817",
"license": "apache-2.0",
"hash": -6860955851824296000,
"line_mean": 28.1374045802,
"line_max": 79,
"alpha_frac": 0.6106890228,
"autogenerated": false,
"ratio": 4.148913043478261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5259602066278262,
"avg_score": null,
"num_lines": null
} |
'''activeurl django template library'''
from django import template
from classytags.core import Tag, Options
from classytags.arguments import MultiKeywordArgument
from ..utils import render_content, Configuration
# django template library
register = template.Library()
class ActiveUrl(Tag, Configuration):
'''django template tag via django-classy-tags'''
# tag name
name = 'activeurl'
# template tag arguments
options = Options(
# all key based arguments mapped to one dict
MultiKeywordArgument('kwargs', required=False),
blocks=[('endactiveurl', 'nodelist')]
)
def render_tag(self, context, kwargs, nodelist):
'''render content with "active" urls logic'''
# load configuration from passed options
self.load_configuration(kwargs)
# get request from context
request = context['request']
# get full path from request
self.full_path = request.get_full_path()
# render content of template tag
context.push()
content = nodelist.render(context)
context.pop()
# check content for "active" urls
content = render_content(
content, self.full_path, self.parent_tag, self.css_class, self.menu
)
return content
# register new template tag
register.tag(ActiveUrl)
| {
"repo_name": "zakdoek/django-activeurl",
"path": "django_activeurl/templatetags/activeurl.py",
"copies": "1",
"size": "1349",
"license": "apache-2.0",
"hash": 1410020281034700000,
"line_mean": 25.4509803922,
"line_max": 79,
"alpha_frac": 0.661971831,
"autogenerated": false,
"ratio": 4.526845637583893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 51
} |
"""actividades URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
from plataforma_fadcanic.settings import MEDIA_ROOT, DEBUG
urlpatterns = [
#url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'actividades.contraparte.views.filtro_proyecto', name='home'),
url(r'^xls/$', 'actividades.utils.save_as_xls', name='save_xls' ),
url(r'^report/$', 'actividades.contraparte.views.generate_report', name='generate_report' ),
url(r'^ajax/proyectos/$', 'actividades.contraparte.views.get_proyectos', name='get_proyectos' ),
url(r'^ajax/salidas/$', 'actividades.contraparte.views.get_salidas', name='get_salidas' ),
url(r'^actividades_pdf/(?P<id_actividad>[0-9]+)/$', 'actividades.views.actividad_pdf', name='actividad_pdf'),
url(r'^fillout/', include('actividades.formutils.urls')),
url(r'^proyecto/', include('actividades.contraparte.urls')),
url(r'^programa/', include('actividades.fadcanic.urls')),
url(r'^chaining/', include('smart_selects.urls')),
#url(r'^admin/', include(admin.site.urls)),
url(r'^i/(?P<hash>\w+)$', 'actividades.contraparte.views.shortview', name='shortview'),
]
urlpatterns += patterns('actividades.contraparte.views',
url(r'^variables/$', 'variables', name='variables'),
url(r'^variables/output/$', 'output', name='output'),
)
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
if DEBUG:
urlpatterns += patterns('',
(r'^uploads/(?P<path>.*)$', 'django.views.static.serve', {'document_root': MEDIA_ROOT}),
)
| {
"repo_name": "shiminasai/plataforma_fadcanic",
"path": "actividades/urls.py",
"copies": "3",
"size": "2252",
"license": "mit",
"hash": -3270612031543439000,
"line_mean": 41.4905660377,
"line_max": 113,
"alpha_frac": 0.6722912966,
"autogenerated": false,
"ratio": 3.194326241134752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.027492768374684957,
"num_lines": 53
} |
"""Activities model."""
from django.db import models
from ..templatetags.util import human_duration
class Itinerary(models.Model):
"""An itinerary contains multiple scheduled activities."""
name = models.CharField(
verbose_name="Name",
help_text="Itinerary's name.",
max_length=50,
blank=False,
)
activities = models.ManyToManyField(
"ScheduledActivity",
through="ItineraryActivity",
through_fields=('itinerary', 'scheduled_activity'),
verbose_name="Activities",
help_text="Activities under this itinerary.",
related_name="itineraries",
blank=True,
)
def __str__(self):
return '{id} {name}[{count}]'.format(
id=self.id,
name=self.name,
count=self.activities.all.count(),
)
class ItineraryActivity(models.Model):
"""An scheduled activity linked to an itinerary"""
itinerary = models.ForeignKey(
Itinerary,
help_text="Itinerary to where this activity is linked.",
blank=True,
null=True,
)
scheduled_activity = models.ForeignKey(
"ScheduledActivity",
help_text="The scheduled activity this link refers to.",
related_name='scheduled_activity',
blank=True,
null=True,
)
order = models.SmallIntegerField(
verbose_name="Order",
help_text=(
"Use this order when no start date/time has been specified on the"
" activity."
),
blank=True,
null=True,
)
preceding_activity = models.ForeignKey(
"ScheduledActivity",
help_text="Link the activity to this one.",
related_name='preceding_scheduled_activity',
blank=True,
null=True,
)
follow_immediately = models.NullBooleanField(
verbose_name="Follow preceding activity immediately.",
help_text=(
"Whether the activity should follow immediately afterwards the"
" preceding activity."
),
)
follow_same_day = models.NullBooleanField(
verbose_name="Follow preceding activity on the same day.",
help_text=(
"Whether the activity should follow the preceding activity on the"
" same day."
),
)
follow_next_day = models.NullBooleanField(
verbose_name="Follow preceding activity on the next day.",
help_text=(
"Whether the activity should follow the preceding activity on the"
" next day."
),
)
force_follow = models.NullBooleanField(
verbose_name=(
"Follow preceding activity regardless of the itinerary it belongs."
),
help_text=(
"Whether the activity should follow the preceding activity no"
"matter the itinerary it belongs. It becomes itinerary headless."
),
)
def __str__(self):
return '{itinerary} - {activity}'.format(
itinerary=self.itinerary,
activity=self.scheduled_activity,
)
class Activity(models.Model):
"""An activity."""
duration = models.PositiveIntegerField(
verbose_name="Duration",
help_text="Activity's duration in minutes.",
blank=True,
null=True,
)
short_description = models.CharField(
verbose_name="Short Description",
help_text="Short description of this activity.",
max_length=60,
blank=False,
)
long_description = models.TextField(
verbose_name="Long Description",
help_text="Long description about this activity.",
blank=True,
)
to_dos = models.ManyToManyField(
"ToDo",
verbose_name="To Dos",
help_text="To dos related with this activity.",
related_name="activities",
blank=True,
)
places = models.ManyToManyField(
"Place",
verbose_name="Place",
help_text="Places related with this activity",
related_name='activities',
blank=True,
)
def __str__(self):
return '{desc} ({duration})'.format(
desc=self.short_description,
duration=self.friendly_duration,
)
@property
def friendly_duration(self):
"""Return a human friendly duration."""
return human_duration(self.duration)
class ScheduledActivity(models.Model):
"""An activity linked to a date/time."""
start_date = models.DateField(
verbose_name="Start Date",
help_text="Activity's start date.",
blank=True,
null=True,
)
end_date = models.DateField(
verbose_name="End Date",
help_text="Activity's end date.",
blank=True,
null=True,
)
start_time = models.TimeField(
verbose_name="Start Time",
help_text="Activity's start time",
blank=True,
null=True,
)
end_time = models.TimeField(
verbose_name="End Time",
help_text="Activity's end time",
blank=True,
null=True,
)
duration = models.PositiveIntegerField(
verbose_name="Duration",
help_text="Activity's duration in minutes.",
blank=True,
null=True,
)
activity = models.ForeignKey(
Activity,
verbose_name="Activity",
help_text="Activity.",
null=False,
)
def __str__(self):
return '{activity}'.format(
activity=self.activity.short_description,
)
class ToDo(models.Model):
"""A ToDo item."""
place = models.ForeignKey(
"Place",
verbose_name="Place",
help_text="Place this to-do references.",
related_name='to_dos',
blank=True,
null=True,
)
short_description = models.CharField(
verbose_name="Short Description",
help_text="Short description of this to-do.",
max_length=60,
blank=False,
)
long_description = models.TextField(
verbose_name="Long Description",
help_text="Long description about this to-do.",
blank=True,
)
is_done = models.BooleanField(
verbose_name="Done?",
help_text="Indicates if this to-do have been accomplished.",
default=False,
)
date_of_completion = models.DateField(
verbose_name="Completion Date",
help_text="Date when this to-do was accomplished.",
blank=True,
null=True,
)
def __str__(self):
return self.short_description
| {
"repo_name": "jricardo27/holiday_planner",
"path": "holiday_planner/holiday_place/models/activity.py",
"copies": "1",
"size": "6576",
"license": "bsd-3-clause",
"hash": -2652502387636516400,
"line_mean": 24.8897637795,
"line_max": 79,
"alpha_frac": 0.5843978102,
"autogenerated": false,
"ratio": 4.372340425531915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 254
} |
# activity/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import ActivityComment, ActivityNoticeSeed, ActivityManager, ActivityNotice, ActivityPost, \
NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ACTIVITY_POSTS, \
NOTICE_FRIEND_ENDORSEMENTS, NOTICE_FRIEND_ENDORSEMENTS_SEED, \
NOTICE_VOTER_DAILY_SUMMARY, NOTICE_VOTER_DAILY_SUMMARY_SEED
from config.base import get_environment_variable
from django.utils.timezone import now
from friend.models import FriendManager
import json
from datetime import timedelta
from reaction.models import ReactionManager
from voter.models import \
NOTIFICATION_FRIEND_OPINIONS_OTHER_REGIONS_EMAIL, NOTIFICATION_FRIEND_OPINIONS_OTHER_REGIONS_SMS, \
NOTIFICATION_FRIEND_OPINIONS_YOUR_BALLOT_EMAIL, NOTIFICATION_FRIEND_OPINIONS_YOUR_BALLOT_SMS,\
NOTIFICATION_VOTER_DAILY_SUMMARY_EMAIL, NOTIFICATION_VOTER_DAILY_SUMMARY_SMS, \
VoterDeviceLinkManager, VoterManager
import wevote_functions.admin
from wevote_functions.functions import is_voter_device_id_valid, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
def delete_activity_comments_for_voter(voter_to_delete_we_vote_id, from_organization_we_vote_id):
status = ''
success = True
activity_comment_entries_deleted = 0
if not positive_value_exists(voter_to_delete_we_vote_id):
status += "DELETE_ACTIVITY_COMMENTS-MISSING_EITHER_FROM_OR_TO_VOTER_WE_VOTE_ID "
success = False
results = {
'status': status,
'success': success,
'voter_to_delete_we_vote_id': voter_to_delete_we_vote_id,
'activity_comment_entries_deleted': activity_comment_entries_deleted,
}
return results
try:
activity_comment_entries_deleted += ActivityComment.objects\
.filter(commenter_voter_we_vote_id__iexact=voter_to_delete_we_vote_id)\
.delete()
except Exception as e:
status += "FAILED-ACTIVITY_COMMENT_UPDATE-INCLUDING_ORG_UPDATE " + str(e) + " "
# #############################################
# Delete based on organization_we_vote_id
try:
activity_comment_entries_deleted += ActivityComment.objects \
.filter(commenter_organization_we_vote_id__iexact=from_organization_we_vote_id) \
.delete()
except Exception as e:
status += "FAILED-ACTIVITY_COMMENT_DELETE-FROM_ORG_WE_VOTE_ID " + str(e) + " "
results = {
'status': status,
'success': success,
'voter_to_delete_we_vote_id': voter_to_delete_we_vote_id,
'activity_comment_entries_deleted': activity_comment_entries_deleted,
}
return results
def delete_activity_notices_for_voter(voter_to_delete_we_vote_id, from_organization_we_vote_id):
status = ''
success = True
activity_notice_seed_entries_deleted = 0
activity_notice_entries_deleted = 0
if not positive_value_exists(voter_to_delete_we_vote_id):
status += "DELETE_ACTIVITY_NOTICE_SEEDS-MISSING_VOTER_WE_VOTE_ID "
success = False
results = {
'status': status,
'success': success,
'voter_to_delete_we_vote_id': voter_to_delete_we_vote_id,
'activity_notice_seed_entries_deleted': activity_notice_seed_entries_deleted,
'activity_notice_entries_deleted': activity_notice_entries_deleted,
}
return results
try:
activity_notice_seed_entries_deleted += ActivityNoticeSeed.objects\
.filter(speaker_voter_we_vote_id__iexact=voter_to_delete_we_vote_id)\
.delete()
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_SEED_UPDATE-INCLUDING_ORG_UPDATE " + str(e) + " "
try:
activity_notice_entries_deleted += ActivityNotice.objects\
.filter(speaker_voter_we_vote_id__iexact=voter_to_delete_we_vote_id) \
.delete()
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_UPDATE-INCLUDING_ORG_UPDATE " + str(e) + " "
# #############################################
# Delete based on speaker_organization_we_vote_id
try:
activity_notice_seed_entries_deleted += ActivityNoticeSeed.objects \
.filter(speaker_organization_we_vote_id__iexact=from_organization_we_vote_id) \
.delete()
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_SEED_UPDATE-FROM_ORG_WE_VOTE_ID " + str(e) + " "
try:
activity_notice_entries_deleted += ActivityNotice.objects \
.filter(speaker_organization_we_vote_id__iexact=from_organization_we_vote_id) \
.delete()
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_UPDATE-FROM_ORG_WE_VOTE_ID " + str(e) + " "
# Now move ActivityNotice recipient_voter_we_vote_id
try:
activity_notice_entries_deleted += ActivityNotice.objects \
.filter(recipient_voter_we_vote_id__iexact=voter_to_delete_we_vote_id) \
.delete()
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_UPDATE-RECIPIENT " + str(e) + " "
results = {
'status': status,
'success': success,
'voter_to_delete_we_vote_id': voter_to_delete_we_vote_id,
'activity_notice_seed_entries_deleted': activity_notice_seed_entries_deleted,
'activity_notice_entries_deleted': activity_notice_entries_deleted,
}
return results
def delete_activity_posts_for_voter(voter_to_delete_we_vote_id, from_organization_we_vote_id):
status = ''
success = True
activity_post_entries_deleted = 0
if not positive_value_exists(voter_to_delete_we_vote_id):
status += "DELETE_ACTIVITY_POSTS-MISSING_EITHER_FROM_OR_TO_VOTER_WE_VOTE_ID "
success = False
results = {
'status': status,
'success': success,
'voter_to_delete_we_vote_id': voter_to_delete_we_vote_id,
'activity_post_entries_deleted': activity_post_entries_deleted,
}
return results
try:
activity_post_entries_deleted += ActivityPost.objects\
.filter(speaker_voter_we_vote_id__iexact=voter_to_delete_we_vote_id)\
.delete()
except Exception as e:
status += "FAILED-ACTIVITY_POST_UPDATE-INCLUDING_ORG_UPDATE " + str(e) + " "
# #############################################
# Delete based on speaker_organization_we_vote_id
try:
activity_post_entries_deleted += ActivityPost.objects \
.filter(speaker_organization_we_vote_id__iexact=from_organization_we_vote_id) \
.delete()
except Exception as e:
status += "FAILED-ACTIVITY_POST_DELETE-FROM_ORG_WE_VOTE_ID " + str(e) + " "
results = {
'status': status,
'success': success,
'voter_to_delete_we_vote_id': voter_to_delete_we_vote_id,
'activity_post_entries_deleted': activity_post_entries_deleted,
}
return results
def move_activity_comments_to_another_voter(
from_voter_we_vote_id, to_voter_we_vote_id, from_organization_we_vote_id, to_organization_we_vote_id,
to_voter=None):
status = ''
success = True
activity_comment_entries_moved = 0
if not positive_value_exists(from_voter_we_vote_id) or not positive_value_exists(to_voter_we_vote_id):
status += "MOVE_ACTIVITY_COMMENTS-MISSING_EITHER_FROM_OR_TO_VOTER_WE_VOTE_ID "
success = False
results = {
'status': status,
'success': success,
'from_voter_we_vote_id': from_voter_we_vote_id,
'to_voter_we_vote_id': to_voter_we_vote_id,
'activity_comment_entries_moved': activity_comment_entries_moved,
}
return results
if from_voter_we_vote_id == to_voter_we_vote_id:
status += "MOVE_ACTIVITY_COMMENTS-FROM_AND_TO_VOTER_WE_VOTE_IDS_IDENTICAL "
success = False
results = {
'status': status,
'success': success,
'from_voter_we_vote_id': from_voter_we_vote_id,
'to_voter_we_vote_id': to_voter_we_vote_id,
'activity_comment_entries_moved': activity_comment_entries_moved,
}
return results
# ######################
# Migrations
to_voter_commenter_name = ''
commenter_profile_image_url_medium = None
commenter_profile_image_url_tiny = None
try:
to_voter_commenter_name = to_voter.get_full_name()
commenter_profile_image_url_medium = to_voter.we_vote_hosted_profile_image_url_medium
commenter_profile_image_url_tiny = to_voter.we_vote_hosted_profile_image_url_tiny
except Exception as e:
status += "UNABLE_TO_GET_NAME_OR_PHOTOS: " + str(e) + " "
if positive_value_exists(to_organization_we_vote_id):
# Move based on commenter_voter_we_vote_id
try:
activity_comment_entries_moved += ActivityComment.objects\
.filter(commenter_voter_we_vote_id__iexact=from_voter_we_vote_id)\
.update(commenter_name=to_voter_commenter_name,
commenter_voter_we_vote_id=to_voter_we_vote_id,
commenter_organization_we_vote_id=to_organization_we_vote_id,
commenter_profile_image_url_medium=commenter_profile_image_url_medium,
commenter_profile_image_url_tiny=commenter_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_COMMENT_UPDATE-INCLUDING_ORG_UPDATE: " + str(e) + " "
# #############################################
# Move based on commenter_organization_we_vote_id
try:
activity_comment_entries_moved += ActivityComment.objects \
.filter(commenter_organization_we_vote_id__iexact=from_organization_we_vote_id) \
.update(commenter_name=to_voter_commenter_name,
commenter_voter_we_vote_id=to_voter_we_vote_id,
commenter_organization_we_vote_id=to_organization_we_vote_id,
commenter_profile_image_url_medium=commenter_profile_image_url_medium,
commenter_profile_image_url_tiny=commenter_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_COMMENT_UPDATE-FROM_ORG_WE_VOTE_ID: " + str(e) + " "
else:
try:
activity_comment_entries_moved += ActivityComment.objects\
.filter(commenter_voter_we_vote_id__iexact=from_voter_we_vote_id)\
.update(commenter_name=to_voter_commenter_name,
commenter_voter_we_vote_id=to_voter_we_vote_id,
commenter_profile_image_url_medium=commenter_profile_image_url_medium,
commenter_profile_image_url_tiny=commenter_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_COMMENT_UPDATE-MISSING_ORG: " + str(e) + " "
results = {
'status': status,
'success': success,
'from_voter_we_vote_id': from_voter_we_vote_id,
'to_voter_we_vote_id': to_voter_we_vote_id,
'activity_comment_entries_moved': activity_comment_entries_moved,
}
return results
def move_activity_notices_to_another_voter(
from_voter_we_vote_id, to_voter_we_vote_id, from_organization_we_vote_id, to_organization_we_vote_id,
to_voter=None):
status = ''
success = True
activity_notice_seed_entries_moved = 0
activity_notice_entries_moved = 0
if not positive_value_exists(from_voter_we_vote_id) or not positive_value_exists(to_voter_we_vote_id):
status += "MOVE_ACTIVITY_NOTICE_SEEDS-MISSING_EITHER_FROM_OR_TO_VOTER_WE_VOTE_ID "
success = False
results = {
'status': status,
'success': success,
'from_voter_we_vote_id': from_voter_we_vote_id,
'to_voter_we_vote_id': to_voter_we_vote_id,
'activity_notice_seed_entries_moved': activity_notice_seed_entries_moved,
'activity_notice_entries_moved': activity_notice_entries_moved,
}
return results
if from_voter_we_vote_id == to_voter_we_vote_id:
status += "MOVE_ACTIVITY_NOTICE_SEEDS-FROM_AND_TO_VOTER_WE_VOTE_IDS_IDENTICAL "
success = False
results = {
'status': status,
'success': success,
'from_voter_we_vote_id': from_voter_we_vote_id,
'to_voter_we_vote_id': to_voter_we_vote_id,
'activity_notice_seed_entries_moved': activity_notice_seed_entries_moved,
'activity_notice_entries_moved': activity_notice_entries_moved,
}
return results
# ######################
# Migrations
to_voter_speaker_name = ''
speaker_profile_image_url_medium = None
speaker_profile_image_url_tiny = None
try:
to_voter_speaker_name = to_voter.get_full_name()
speaker_profile_image_url_medium = to_voter.we_vote_hosted_profile_image_url_medium
speaker_profile_image_url_tiny = to_voter.we_vote_hosted_profile_image_url_tiny
except Exception as e:
status += "UNABLE_TO_GET_NAME_OR_PHOTOS: " + str(e) + " "
if positive_value_exists(to_organization_we_vote_id):
# Move based on speaker_voter_we_vote_id
try:
activity_notice_seed_entries_moved += ActivityNoticeSeed.objects\
.filter(speaker_voter_we_vote_id__iexact=from_voter_we_vote_id)\
.update(speaker_name=to_voter_speaker_name,
speaker_voter_we_vote_id=to_voter_we_vote_id,
speaker_organization_we_vote_id=to_organization_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_SEED_UPDATE-INCLUDING_ORG_UPDATE: " + str(e) + " "
try:
activity_notice_entries_moved += ActivityNotice.objects\
.filter(speaker_voter_we_vote_id__iexact=from_voter_we_vote_id) \
.update(speaker_name=to_voter_speaker_name,
speaker_voter_we_vote_id=to_voter_we_vote_id,
speaker_organization_we_vote_id=to_organization_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_UPDATE-INCLUDING_ORG_UPDATE " + str(e) + " "
# #############################################
# Move based on speaker_organization_we_vote_id
try:
activity_notice_seed_entries_moved += ActivityNoticeSeed.objects \
.filter(speaker_organization_we_vote_id__iexact=from_organization_we_vote_id) \
.update(speaker_name=to_voter_speaker_name,
speaker_voter_we_vote_id=to_voter_we_vote_id,
speaker_organization_we_vote_id=to_organization_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_SEED_UPDATE-FROM_ORG_WE_VOTE_ID: " + str(e) + " "
try:
activity_notice_entries_moved += ActivityNotice.objects \
.filter(speaker_organization_we_vote_id__iexact=from_organization_we_vote_id) \
.update(speaker_name=to_voter_speaker_name,
speaker_voter_we_vote_id=to_voter_we_vote_id,
speaker_organization_we_vote_id=to_organization_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_UPDATE-FROM_ORG_WE_VOTE_ID: " + str(e) + " "
else:
try:
activity_notice_seed_entries_moved += ActivityNoticeSeed.objects\
.filter(speaker_voter_we_vote_id__iexact=from_voter_we_vote_id)\
.update(speaker_name=to_voter_speaker_name,
speaker_voter_we_vote_id=to_voter_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_SEED_UPDATE-MISSING_ORG: " + str(e) + " "
try:
activity_notice_entries_moved += ActivityNotice.objects\
.filter(speaker_voter_we_vote_id__iexact=from_voter_we_vote_id) \
.update(speaker_name=to_voter_speaker_name,
speaker_voter_we_vote_id=to_voter_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_UPDATE-MISSING_ORG: " + str(e) + " "
# Now move ActivityNotice recipient_voter_we_vote_id
try:
activity_notice_entries_moved += ActivityNotice.objects \
.filter(recipient_voter_we_vote_id__iexact=from_voter_we_vote_id) \
.update(speaker_name=to_voter_speaker_name,
recipient_voter_we_vote_id=to_voter_we_vote_id)
except Exception as e:
status += "FAILED-ACTIVITY_NOTICE_UPDATE-RECIPIENT: " + str(e) + " "
results = {
'status': status,
'success': success,
'from_voter_we_vote_id': from_voter_we_vote_id,
'to_voter_we_vote_id': to_voter_we_vote_id,
'activity_notice_seed_entries_moved': activity_notice_seed_entries_moved,
'activity_notice_entries_moved': activity_notice_entries_moved,
}
return results
def move_activity_posts_to_another_voter(
from_voter_we_vote_id, to_voter_we_vote_id, from_organization_we_vote_id, to_organization_we_vote_id,
to_voter=None):
status = ''
success = True
activity_post_entries_moved = 0
if not positive_value_exists(from_voter_we_vote_id) or not positive_value_exists(to_voter_we_vote_id):
status += "MOVE_ACTIVITY_POSTS-MISSING_EITHER_FROM_OR_TO_VOTER_WE_VOTE_ID "
success = False
results = {
'status': status,
'success': success,
'from_voter_we_vote_id': from_voter_we_vote_id,
'to_voter_we_vote_id': to_voter_we_vote_id,
'activity_post_entries_moved': activity_post_entries_moved,
}
return results
if from_voter_we_vote_id == to_voter_we_vote_id:
status += "MOVE_ACTIVITY_POSTS-FROM_AND_TO_VOTER_WE_VOTE_IDS_IDENTICAL "
success = False
results = {
'status': status,
'success': success,
'from_voter_we_vote_id': from_voter_we_vote_id,
'to_voter_we_vote_id': to_voter_we_vote_id,
'activity_post_entries_moved': activity_post_entries_moved,
}
return results
# ######################
# Migrations
to_voter_speaker_name = ''
speaker_profile_image_url_medium = None
speaker_profile_image_url_tiny = None
try:
to_voter_speaker_name = to_voter.get_full_name()
speaker_profile_image_url_medium = to_voter.we_vote_hosted_profile_image_url_medium
speaker_profile_image_url_tiny = to_voter.we_vote_hosted_profile_image_url_tiny
except Exception as e:
status += "UNABLE_TO_GET_NAME_OR_PHOTOS: " + str(e) + " "
if positive_value_exists(to_organization_we_vote_id):
# Move based on speaker_voter_we_vote_id
try:
activity_post_entries_moved += ActivityPost.objects\
.filter(speaker_voter_we_vote_id__iexact=from_voter_we_vote_id)\
.update(speaker_name=to_voter_speaker_name,
speaker_voter_we_vote_id=to_voter_we_vote_id,
speaker_organization_we_vote_id=to_organization_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_POST_UPDATE-INCLUDING_ORG_UPDATE " + str(e) + " "
# #############################################
# Move based on speaker_organization_we_vote_id
try:
activity_post_entries_moved += ActivityPost.objects \
.filter(speaker_organization_we_vote_id__iexact=from_organization_we_vote_id) \
.update(speaker_name=to_voter_speaker_name,
speaker_voter_we_vote_id=to_voter_we_vote_id,
speaker_organization_we_vote_id=to_organization_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_POST_UPDATE-FROM_ORG_WE_VOTE_ID: " + str(e) + " "
else:
try:
activity_post_entries_moved += ActivityPost.objects\
.filter(speaker_voter_we_vote_id__iexact=from_voter_we_vote_id)\
.update(speaker_name=to_voter_speaker_name,
speaker_voter_we_vote_id=to_voter_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
except Exception as e:
status += "FAILED-ACTIVITY_POST_UPDATE-MISSING_ORG: " + str(e) + " "
results = {
'status': status,
'success': success,
'from_voter_we_vote_id': from_voter_we_vote_id,
'to_voter_we_vote_id': to_voter_we_vote_id,
'activity_post_entries_moved': activity_post_entries_moved,
}
return results
def notice_friend_endorsements_send(
speaker_voter_we_vote_id='',
recipient_voter_we_vote_id='',
invitation_message='',
activity_tidbit_we_vote_id='',
position_name_list=[]):
"""
We are sending an email to the speaker's friends who are
subscribed to NOTIFICATION_FRIEND_OPINIONS_YOUR_BALLOT or NOTIFICATION_FRIEND_OPINIONS_OTHER_REGIONS
:param speaker_voter_we_vote_id:
:param recipient_voter_we_vote_id:
:param invitation_message:
:param activity_tidbit_we_vote_id:
:param position_name_list:
:return:
"""
from email_outbound.controllers import schedule_email_with_email_outbound_description
from email_outbound.models import EmailManager, NOTICE_FRIEND_ENDORSEMENTS_TEMPLATE
status = ""
success = True
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_by_we_vote_id(speaker_voter_we_vote_id)
from organization.controllers import transform_web_app_url
web_app_root_url_verified = transform_web_app_url('') # Change to client URL if needed
if not voter_results['voter_found']:
error_results = {
'status': "SPEAKER_VOTER_NOT_FOUND ",
'success': False,
}
return error_results
speaker_voter = voter_results['voter']
recipient_voter_results = voter_manager.retrieve_voter_by_we_vote_id(recipient_voter_we_vote_id)
if not recipient_voter_results['voter_found']:
error_results = {
'status': "RECIPIENT_VOTER_NOT_FOUND ",
'success': False,
}
return error_results
recipient_voter = recipient_voter_results['voter']
email_manager = EmailManager()
# Retrieve the email address of the original_sender (which is the person we are sending this notification to)
recipient_email_we_vote_id = ""
recipient_email = ""
recipient_email_subscription_secret_key = ""
if recipient_voter.has_email_with_verified_ownership():
results = email_manager.retrieve_primary_email_with_ownership_verified(recipient_voter_we_vote_id)
success = results['success']
if results['email_address_object_found']:
recipient_email_object = results['email_address_object']
recipient_email_we_vote_id = recipient_email_object.we_vote_id
recipient_email = recipient_email_object.normalized_email_address
if positive_value_exists(recipient_email_object.subscription_secret_key):
recipient_email_subscription_secret_key = recipient_email_object.subscription_secret_key
else:
recipient_email_subscription_secret_key = \
email_manager.update_email_address_with_new_subscription_secret_key(
email_we_vote_id=recipient_email_we_vote_id)
else:
# The recipient must have a valid email
status += "RECIPIENT_VOTER_DOES_NOT_HAVE_VALID_EMAIL "
success = True
results = {
'success': success,
'status': status,
}
return results
# Retrieve the email address of the speaker_voter - used in invitation to help the recipient understand who sent
speaker_voter_email = ""
speaker_voter_we_vote_id = speaker_voter.we_vote_id
if speaker_voter.has_email_with_verified_ownership():
results = email_manager.retrieve_primary_email_with_ownership_verified(speaker_voter_we_vote_id)
if results['email_address_object_found']:
speaker_voter_email_object = results['email_address_object']
speaker_voter_email = speaker_voter_email_object.normalized_email_address
else:
# Not having an email is ok now, since the speaker_voter could have signed in with SMS or Twitter
status += "SPEAKER_VOTER_DOES_NOT_HAVE_VALID_EMAIL "
if positive_value_exists(recipient_email_we_vote_id):
recipient_voter_we_vote_id = recipient_voter.we_vote_id
# Template variables
real_name_only = True
recipient_name = recipient_voter.get_full_name(real_name_only)
speaker_voter_name = speaker_voter.get_full_name(real_name_only)
speaker_voter_photo = speaker_voter.voter_photo_url()
speaker_voter_description = ""
speaker_voter_network_details = ""
if positive_value_exists(speaker_voter_name):
subject = speaker_voter_name
else:
subject = "Your friend"
activity_description = ''
if position_name_list and len(position_name_list) > 0:
if len(position_name_list) == 1:
subject += " added opinion about "
subject += position_name_list[0]
activity_description += "added opinion about "
activity_description += position_name_list[0]
elif len(position_name_list) == 2:
subject += " added opinions about "
subject += position_name_list[0]
subject += " and "
subject += position_name_list[1]
activity_description += "added opinions about "
activity_description += position_name_list[0]
activity_description += " and "
activity_description += position_name_list[1]
elif len(position_name_list) >= 3:
subject += " added opinions about "
subject += position_name_list[0]
subject += ", "
subject += position_name_list[1]
subject += " and "
subject += position_name_list[2]
activity_description += "added opinions about "
activity_description += position_name_list[0]
activity_description += ", "
activity_description += position_name_list[1]
activity_description += " and "
activity_description += position_name_list[2]
else:
subject += " has added new opinion"
activity_description += "has added new opinion"
# Variables used by templates/email_outbound/email_templates/notice_friend_endorsements.txt and .html
template_variables_for_json = {
"activity_description": activity_description,
"subject": subject,
"invitation_message": invitation_message,
"sender_name": speaker_voter_name,
"sender_photo": speaker_voter_photo,
"sender_email_address": speaker_voter_email, # Does not affect the "From" email header
"sender_description": speaker_voter_description,
"sender_network_details": speaker_voter_network_details,
"recipient_name": recipient_name,
"recipient_voter_email": recipient_email,
"recipient_unsubscribe_url": web_app_root_url_verified + "/settings/notifications/esk/" +
recipient_email_subscription_secret_key,
"email_open_url": WE_VOTE_SERVER_ROOT_URL + "/apis/v1/emailOpen?email_key=1234",
"view_new_endorsements_url": web_app_root_url_verified + "/news/a/" + activity_tidbit_we_vote_id,
"view_your_ballot_url": web_app_root_url_verified + "/ballot",
}
template_variables_in_json = json.dumps(template_variables_for_json, ensure_ascii=True)
# Create the outbound email description, then schedule it
kind_of_email_template = NOTICE_FRIEND_ENDORSEMENTS_TEMPLATE
outbound_results = email_manager.create_email_outbound_description(
sender_voter_we_vote_id=speaker_voter_we_vote_id,
sender_voter_email=speaker_voter_email,
sender_voter_name=speaker_voter_name,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
recipient_email_we_vote_id=recipient_email_we_vote_id,
recipient_voter_email=recipient_email,
template_variables_in_json=template_variables_in_json,
kind_of_email_template=kind_of_email_template)
status += outbound_results['status'] + " "
success = outbound_results['success']
if outbound_results['email_outbound_description_saved']:
email_outbound_description = outbound_results['email_outbound_description']
schedule_results = schedule_email_with_email_outbound_description(email_outbound_description)
status += schedule_results['status'] + " "
success = schedule_results['success']
if schedule_results['email_scheduled_saved']:
# messages_to_send.append(schedule_results['email_scheduled_id'])
email_scheduled = schedule_results['email_scheduled']
send_results = email_manager.send_scheduled_email(email_scheduled)
email_scheduled_sent = send_results['email_scheduled_sent']
status += send_results['status']
success = send_results['success']
results = {
'success': success,
'status': status,
}
return results
def assemble_voter_daily_summary(
assemble_activity_start_date=None,
recipient_voter_we_vote_id='',
number_of_friends_to_display=3):
status = ''
success = True
activity_manager = ActivityManager()
friend_manager = FriendManager()
friend_activity_dict_list = []
reaction_manager = ReactionManager()
subject = 'Discussion(s) have been added'
introduction_line = 'At least one friend has added a discussion.'
# Collect all of the data about activity in this voter's network since the last daily_summary
current_friends_results = friend_manager.retrieve_friends_we_vote_id_list(recipient_voter_we_vote_id)
success = current_friends_results['success']
status += current_friends_results['status']
if not current_friends_results['friends_we_vote_id_list_found']:
status += "ASSEMBLE_VOTER_DAILY_SUMMARY-NO_FRIENDS_FOUND "
results = {
'success': success,
'status': status,
'friend_activity_dict_list': friend_activity_dict_list,
'introduction_line': introduction_line,
'subject': subject,
}
return results
else:
friends_we_vote_id_list = current_friends_results['friends_we_vote_id_list']
# ##########################
# Each activity post, with name, first line, # of comments and # of likes
highest_priority_by_friend_we_vote_id = {}
raw_list_by_friend_we_vote_id = {}
post_results = activity_manager.retrieve_activity_post_list(
speaker_voter_we_vote_id_list=friends_we_vote_id_list,
since_date=assemble_activity_start_date)
if post_results['success']:
friends_post_list = post_results['activity_post_list']
for one_post in friends_post_list:
number_of_comments = activity_manager.fetch_number_of_comments(one_post.we_vote_id)
number_of_likes = reaction_manager.fetch_number_of_likes(one_post.we_vote_id)
# Higher priority score makes it more likely this post is at top of list
priority_score = 0
if not one_post.speaker_name or one_post.speaker_name.startswith('Voter-'):
priority_score -= 20
if one_post.speaker_profile_image_url_medium and len(one_post.speaker_profile_image_url_medium) > 1:
priority_score += 10
if number_of_comments > 0:
priority_score += number_of_comments * 3
if number_of_likes > 0:
priority_score += number_of_likes * 1
highlight_item_dict = {
# 'date_created': one_post.date_created.strftime('%Y-%m-%d %H:%M:%S'),
'number_of_comments': number_of_comments,
'number_of_likes': number_of_likes,
'priority_score': priority_score,
'speaker_name': one_post.speaker_name,
'speaker_profile_image_url_medium': one_post.speaker_profile_image_url_medium,
'speaker_voter_we_vote_id': one_post.speaker_voter_we_vote_id,
'statement_text': one_post.statement_text,
'we_vote_id': one_post.we_vote_id,
}
if one_post.speaker_voter_we_vote_id in highest_priority_by_friend_we_vote_id and \
highest_priority_by_friend_we_vote_id[one_post.speaker_voter_we_vote_id] > priority_score:
# Do not add this highlight_item_dict because the highlight item captured for this person
# already has a higher priority_score
pass
else:
raw_list_by_friend_we_vote_id[one_post.speaker_voter_we_vote_id] = highlight_item_dict
highest_priority_by_friend_we_vote_id[one_post.speaker_voter_we_vote_id] = priority_score
# ##########################
# Endorsements made
# ##########################
# Now that we know raw_list_by_friend_we_vote_id only has one highlight_item_dict per friend,
# drop them into simple friend_activity_dict_list so we can sort them by priority_score
friend_activity_dict_list = raw_list_by_friend_we_vote_id.values()
sorted(friend_activity_dict_list, key=lambda item: item['priority_score'], reverse=True)
friend_name_list_in_order = []
names_stored = 0
for one_activity_dict in friend_activity_dict_list:
if names_stored < number_of_friends_to_display:
friend_name_list_in_order.append(one_activity_dict['speaker_name'])
names_stored += 1
if len(friend_name_list_in_order) > 0:
introduction_line = ''
subject = ''
if len(friend_name_list_in_order) == 1:
subject += friend_name_list_in_order[0]
subject += " added a discussion"
introduction_line += "Your friend "
introduction_line += friend_name_list_in_order[0]
introduction_line += " has added one or more discussion."
elif len(friend_name_list_in_order) == 2:
subject += friend_name_list_in_order[0]
subject += " and "
subject += friend_name_list_in_order[1]
subject += " added discussions"
introduction_line += "Your friends "
introduction_line += friend_name_list_in_order[0]
introduction_line += " and "
introduction_line += friend_name_list_in_order[1]
introduction_line += " have added discussions."
elif len(friend_name_list_in_order) >= 3:
subject += friend_name_list_in_order[0]
subject += ", "
subject += friend_name_list_in_order[1]
subject += " and "
subject += friend_name_list_in_order[2]
subject += " added discussions"
introduction_line += "Your friends "
introduction_line += friend_name_list_in_order[0]
introduction_line += ", "
introduction_line += friend_name_list_in_order[1]
introduction_line += " and "
introduction_line += friend_name_list_in_order[2]
introduction_line += " have added discussions."
results = {
'success': success,
'status': status,
'friend_activity_dict_list': friend_activity_dict_list,
'introduction_line': introduction_line,
'subject': subject,
}
return results
def notice_voter_daily_summary_send( # NOTICE_VOTER_DAILY_SUMMARY
recipient_voter_we_vote_id='',
friend_activity_dict_list=[],
introduction_line='',
subject=''):
"""
:param recipient_voter_we_vote_id:
:param friend_activity_dict_list:
:param subject:
:param introduction_line:
:return:
"""
from email_outbound.controllers import schedule_email_with_email_outbound_description
from email_outbound.models import EmailManager, NOTICE_VOTER_DAILY_SUMMARY_TEMPLATE
status = ""
voter_manager = VoterManager()
from organization.controllers import transform_web_app_url
web_app_root_url_verified = transform_web_app_url('') # Change to client URL if needed
recipient_voter_results = voter_manager.retrieve_voter_by_we_vote_id(recipient_voter_we_vote_id)
if not recipient_voter_results['voter_found']:
error_results = {
'status': "RECIPIENT_VOTER_NOT_FOUND ",
'success': False,
}
return error_results
recipient_voter = recipient_voter_results['voter']
email_manager = EmailManager()
# Retrieve the email address of the original_sender (which is the person we are sending this notification to)
recipient_email_we_vote_id = ""
recipient_email = ""
recipient_email_subscription_secret_key = ""
if recipient_voter.has_email_with_verified_ownership():
results = email_manager.retrieve_primary_email_with_ownership_verified(recipient_voter_we_vote_id)
success = results['success']
if results['email_address_object_found']:
recipient_email_object = results['email_address_object']
recipient_email_we_vote_id = recipient_email_object.we_vote_id
recipient_email = recipient_email_object.normalized_email_address
if positive_value_exists(recipient_email_object.subscription_secret_key):
recipient_email_subscription_secret_key = recipient_email_object.subscription_secret_key
else:
recipient_email_subscription_secret_key = \
email_manager.update_email_address_with_new_subscription_secret_key(
email_we_vote_id=recipient_email_we_vote_id)
else:
# The recipient must have a valid email
status += "RECIPIENT_VOTER_DOES_NOT_HAVE_VALID_EMAIL "
success = True
results = {
'success': success,
'status': status,
}
return results
if positive_value_exists(recipient_email_we_vote_id):
recipient_voter_we_vote_id = recipient_voter.we_vote_id
# Trim down friend_activity_dict_list to only x items
number_of_highlights_to_show = 3
number_shown = 0
friend_activity_dict_list_modified = []
for highlight_dict in friend_activity_dict_list:
if number_shown < number_of_highlights_to_show:
highlight_dict['view_activity_tidbit_url'] = \
web_app_root_url_verified + "/news/a/" + highlight_dict['we_vote_id']
friend_activity_dict_list_modified.append(highlight_dict)
number_shown += 1
# Template variables
real_name_only = True
recipient_name = recipient_voter.get_full_name(real_name_only)
# speaker_voter_name = speaker_voter.get_full_name(real_name_only)
# speaker_voter_photo = speaker_voter.voter_photo_url()
# speaker_voter_description = ""
# speaker_voter_network_details = ""
# Variables used by templates/email_outbound/email_templates/friend_accepted_invitation.txt and .html
if not positive_value_exists(subject):
subject = "Your friends have commented"
template_variables_for_json = {
"introduction_line": introduction_line,
"subject": subject,
"friend_activity_dict_list": friend_activity_dict_list_modified,
# "sender_name": speaker_voter_name,
# "sender_photo": speaker_voter_photo,
# "sender_email_address": speaker_voter_email, # Does not affect the "From" email header
# "sender_description": speaker_voter_description,
# "sender_network_details": speaker_voter_network_details,
"recipient_name": recipient_name,
"recipient_voter_email": recipient_email,
"recipient_unsubscribe_url": web_app_root_url_verified + "/settings/notifications/esk/" +
recipient_email_subscription_secret_key,
"email_open_url": WE_VOTE_SERVER_ROOT_URL + "/apis/v1/emailOpen?email_key=1234",
"view_main_discussion_page_url": web_app_root_url_verified + "/news",
"view_your_ballot_url": web_app_root_url_verified + "/ballot",
}
template_variables_in_json = json.dumps(template_variables_for_json, ensure_ascii=True)
from_email_for_daily_summary = "We Vote <info@WeVote.US>" # TODO DALE Make system variable
# Create the outbound email description, then schedule it
kind_of_email_template = NOTICE_VOTER_DAILY_SUMMARY_TEMPLATE
outbound_results = email_manager.create_email_outbound_description(
sender_voter_we_vote_id=recipient_voter_we_vote_id,
sender_voter_email=from_email_for_daily_summary,
sender_voter_name='',
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
recipient_email_we_vote_id=recipient_email_we_vote_id,
recipient_voter_email=recipient_email,
template_variables_in_json=template_variables_in_json,
kind_of_email_template=kind_of_email_template)
status += outbound_results['status'] + " "
success = outbound_results['success']
if outbound_results['email_outbound_description_saved']:
email_outbound_description = outbound_results['email_outbound_description']
schedule_results = schedule_email_with_email_outbound_description(email_outbound_description)
status += schedule_results['status'] + " "
success = schedule_results['success']
if schedule_results['email_scheduled_saved']:
# messages_to_send.append(schedule_results['email_scheduled_id'])
email_scheduled = schedule_results['email_scheduled']
send_results = email_manager.send_scheduled_email(email_scheduled)
email_scheduled_sent = send_results['email_scheduled_sent']
status += send_results['status']
success = send_results['success']
results = {
'success': success,
'status': status,
}
return results
def process_activity_notice_seeds_triggered_by_batch_process():
"""
We assume only one of this function is running at any time.
:return:
"""
status = ''
success = True
activity_notice_seed_count = 0
activity_notice_count = 0
# Retrieve ActivityNoticeSeeds that need to have some processing done, including ActivityNotice entries created
activity_manager = ActivityManager()
# We want this process to stop before it has run for 5 minutes, so that we don't collide with another process
# starting. Please also see: activity_notice_processing_time_out_duration & checked_out_expiration_time
# We adjust timeout for ACTIVITY_NOTICE_PROCESS in retrieve_batch_process_list
longest_activity_notice_processing_run_time_allowed = 270 # 4.5 minutes * 60 seconds
when_process_must_stop = now() + timedelta(seconds=longest_activity_notice_processing_run_time_allowed)
# Update existing ActivityNoticeSeed entries (notices_to_be_updated=True)
# Only run this when the minutes are divisible by "5"
# Note: Because of other processes running we cannot count on every entry updating every 5 minutes -- there
# is some randomness to when they get updated
update_interval = 5
time_now = now()
if time_now.minute % update_interval == 0:
continue_retrieving_notices_to_be_updated = True
activity_notice_seed_id_already_reviewed_list = []
safety_valve_count = 0
while continue_retrieving_notices_to_be_updated and \
safety_valve_count < 1000 and \
when_process_must_stop > now():
safety_valve_count += 1
results = activity_manager.retrieve_next_activity_notice_seed_to_process(
notices_to_be_updated=True,
activity_notice_seed_id_already_reviewed_list=activity_notice_seed_id_already_reviewed_list)
if results['activity_notice_seed_found']:
# We retrieve from these seed types: NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ENDORSEMENTS_SEED
# We do not update NOTICE_VOTER_DAILY_SUMMARY_SEED
activity_notice_seed = results['activity_notice_seed']
activity_notice_seed_id_already_reviewed_list.append(activity_notice_seed.id)
activity_notice_seed_count += 1
update_activity_notices = False
if activity_notice_seed.kind_of_seed == NOTICE_FRIEND_ENDORSEMENTS_SEED:
# Only update if the number of positions has changed
update_seed_results = update_activity_notice_seed_with_positions(activity_notice_seed)
if update_seed_results['success'] and \
update_seed_results['activity_notice_seed_changed'] and not \
update_seed_results['date_of_notice_earlier_than_update_window']:
activity_notice_seed = update_seed_results['activity_notice_seed']
update_activity_notices = True
elif activity_notice_seed.kind_of_seed == NOTICE_ACTIVITY_POST_SEED:
# We are storing number_of_comments and number_of_likes in NOTICE_ACTIVITY_POST_SEED, so we need
# to update in case there have been changes.
update_activity_notices = True
if update_activity_notices:
# Update the activity drop down in each voter touched (friends of the voter acting)
update_results = update_or_create_activity_notices_from_seed(activity_notice_seed)
if not update_results['success']:
status += update_results['status']
else:
continue_retrieving_notices_to_be_updated = False
# Create new ActivityNotice entries, which appear in header notification menu (notices_to_be_created=True)
continue_retrieving_notices_to_be_created = True
activity_notice_seed_id_already_reviewed_list = [] # Reset
safety_valve_count = 0
while continue_retrieving_notices_to_be_created and safety_valve_count < 1000 and when_process_must_stop > now():
safety_valve_count += 1
results = activity_manager.retrieve_next_activity_notice_seed_to_process(
notices_to_be_created=True,
activity_notice_seed_id_already_reviewed_list=activity_notice_seed_id_already_reviewed_list)
if results['activity_notice_seed_found']:
# We retrieve from these seed types: NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ENDORSEMENTS_SEED
activity_notice_seed = results['activity_notice_seed']
activity_notice_seed_id_already_reviewed_list.append(activity_notice_seed.id)
activity_notice_seed_count += 1
# Create the activity drop down in each voter's header for each voter touched (friends of the voter acting)
create_results = update_or_create_activity_notices_from_seed(activity_notice_seed)
# activity_notice_seed.activity_notices_created = True # Marked in function immediately above
activity_notice_count += create_results['activity_notice_count']
# NOTE: Since the daily summary is only sent once per day, wait to create NOTICE_VOTER_DAILY_SUMMARY_SEED
# in the update step above
else:
continue_retrieving_notices_to_be_created = False
# Create NOTICE_VOTER_DAILY_SUMMARY_SEED entries for any other SEED that needs to go into the DAILY_SUMMARY
# We retrieve from these seed types: NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ENDORSEMENTS_SEED
continue_retrieving_to_be_added_to_voter_summary = True
activity_notice_seed_id_already_reviewed_list = []
safety_valve_count = 0
while continue_retrieving_to_be_added_to_voter_summary and \
safety_valve_count < 1000 and \
when_process_must_stop > now():
safety_valve_count += 1
results = activity_manager.retrieve_next_activity_notice_seed_to_process(
to_be_added_to_voter_daily_summary=True,
activity_notice_seed_id_already_reviewed_list=activity_notice_seed_id_already_reviewed_list)
if results['activity_notice_seed_found']:
# We retrieve from these seed types: NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ENDORSEMENTS_SEED
activity_notice_seed = results['activity_notice_seed']
activity_notice_seed_id_already_reviewed_list.append(activity_notice_seed.id)
activity_notice_seed_count += 1
# Create the seeds (one for each voter touched) which will be used to send a daily summary
# to each voter touched. So we end up with new NOTICE_VOTER_DAILY_SUMMARY_SEED entries for the friends
# of the creators of these seeds: NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ENDORSEMENTS_SEED
update_results = update_or_create_voter_daily_summary_seeds_from_seed(activity_notice_seed)
if not update_results['success']:
status += update_results['status']
else:
continue_retrieving_to_be_added_to_voter_summary = False
# Send email notifications (notices_to_be_scheduled=True)
continue_retrieving_notices_to_be_scheduled = True
activity_notice_seed_id_already_reviewed_list = [] # Reset
safety_valve_count = 0
while continue_retrieving_notices_to_be_scheduled and safety_valve_count < 1000 and when_process_must_stop > now():
safety_valve_count += 1
results = activity_manager.retrieve_next_activity_notice_seed_to_process(
notices_to_be_scheduled=True,
activity_notice_seed_id_already_reviewed_list=activity_notice_seed_id_already_reviewed_list)
if results['activity_notice_seed_found']:
# We retrieve from these seed types: NOTICE_FRIEND_ENDORSEMENTS_SEED, NOTICE_VOTER_DAILY_SUMMARY_SEED
activity_notice_seed = results['activity_notice_seed']
activity_notice_seed_id_already_reviewed_list.append(activity_notice_seed.id)
# activity_notice_seed_count += 1
schedule_results = schedule_activity_notices_from_seed(activity_notice_seed)
# activity_notice_seed.activity_notices_scheduled = True # Marked in function immediately above
if not schedule_results['success']:
status += schedule_results['status']
# activity_notice_count += create_results['activity_notice_count']
else:
continue_retrieving_notices_to_be_scheduled = False
results = {
'success': success,
'status': status,
'activity_notice_seed_count': activity_notice_seed_count,
'activity_notice_count': activity_notice_count,
}
return results
def update_or_create_activity_notices_from_seed(activity_notice_seed):
status = ''
success = True
activity_notice_count = 0
activity_manager = ActivityManager()
friend_manager = FriendManager()
reaction_manager = ReactionManager()
# Create or update ActivityNotice entries
# Who needs to see a notice?
audience = 'FRIENDS'
# audience = 'ONE_FRIEND'
if audience == 'FRIENDS':
# Retrieve all friends of activity_notice_seed.speaker_voter_we_vote_id
status += "KIND_OF_LIST-CURRENT_FRIENDS "
retrieve_current_friends_as_voters_results = \
friend_manager.retrieve_current_friends_as_voters(activity_notice_seed.speaker_voter_we_vote_id)
success = retrieve_current_friends_as_voters_results['success']
status += retrieve_current_friends_as_voters_results['status']
if retrieve_current_friends_as_voters_results['friend_list_found']:
current_friend_list = retrieve_current_friends_as_voters_results['friend_list']
if activity_notice_seed.kind_of_seed == NOTICE_FRIEND_ENDORSEMENTS_SEED:
kind_of_notice = NOTICE_FRIEND_ENDORSEMENTS
# Names for quick summaries
position_name_list = []
if positive_value_exists(activity_notice_seed.position_names_for_friends_serialized):
position_name_list_for_friends = \
json.loads(activity_notice_seed.position_names_for_friends_serialized)
position_name_list += position_name_list_for_friends
if positive_value_exists(activity_notice_seed.position_names_for_public_serialized):
position_name_list_for_public = \
json.loads(activity_notice_seed.position_names_for_public_serialized)
position_name_list += position_name_list_for_public
position_name_list_serialized = json.dumps(position_name_list)
# We Vote Ids for full position display
position_we_vote_id_list = []
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_friends_serialized):
position_we_vote_id_list_for_friends = \
json.loads(activity_notice_seed.position_we_vote_ids_for_friends_serialized)
position_we_vote_id_list += position_we_vote_id_list_for_friends
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_public_serialized):
position_we_vote_id_list_for_public = \
json.loads(activity_notice_seed.position_we_vote_ids_for_public_serialized)
position_we_vote_id_list += position_we_vote_id_list_for_public
position_we_vote_id_list_serialized = json.dumps(position_we_vote_id_list)
for friend_voter in current_friend_list:
# Add switch for NOTICE_FRIEND_ACTIVITY_POSTS here
# Decide whether to send email or sms based on friend's notification settings
# We will need to figure out if this endorsement is on this voter's ballot
# NOTIFICATION_FRIEND_OPINIONS_OTHER_REGIONS_EMAIL
# NOTIFICATION_FRIEND_OPINIONS_YOUR_BALLOT_EMAIL
send_to_email = friend_voter.is_notification_status_flag_set(
NOTIFICATION_FRIEND_OPINIONS_OTHER_REGIONS_EMAIL)
# NOTIFICATION_FRIEND_OPINIONS_YOUR_BALLOT_SMS
# NOTIFICATION_FRIEND_OPINIONS_OTHER_REGIONS_SMS
send_to_sms = friend_voter.is_notification_status_flag_set(
NOTIFICATION_FRIEND_OPINIONS_OTHER_REGIONS_SMS)
# ###########################
# This is the entry that goes in the header drop-down
activity_results = update_or_create_activity_notice_for_friend_endorsements(
activity_notice_seed_id=activity_notice_seed.id,
activity_tidbit_we_vote_id=activity_notice_seed.we_vote_id,
kind_of_seed=activity_notice_seed.kind_of_seed,
kind_of_notice=kind_of_notice,
position_name_list_serialized=position_name_list_serialized,
position_we_vote_id_list_serialized=position_we_vote_id_list_serialized,
recipient_voter_we_vote_id=friend_voter.we_vote_id,
send_to_email=send_to_email,
send_to_sms=send_to_sms,
speaker_name=activity_notice_seed.speaker_name,
speaker_organization_we_vote_id=activity_notice_seed.speaker_organization_we_vote_id,
speaker_voter_we_vote_id=activity_notice_seed.speaker_voter_we_vote_id,
speaker_profile_image_url_medium=activity_notice_seed.speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=activity_notice_seed.speaker_profile_image_url_tiny)
if activity_results['success']:
activity_notice_count += 1
else:
status += activity_results['status']
elif activity_notice_seed.kind_of_seed == NOTICE_ACTIVITY_POST_SEED:
# Pop the last activity_tidbit_we_vote_id
activity_tidbit_we_vote_id = ''
if positive_value_exists(activity_notice_seed.activity_tidbit_we_vote_ids_for_friends_serialized):
activity_tidbit_we_vote_id_list_for_friends = \
json.loads(activity_notice_seed.activity_tidbit_we_vote_ids_for_friends_serialized)
if len(activity_tidbit_we_vote_id_list_for_friends) > 0:
activity_tidbit_we_vote_id = activity_tidbit_we_vote_id_list_for_friends.pop()
if not positive_value_exists(activity_tidbit_we_vote_id):
if positive_value_exists(activity_notice_seed.activity_tidbit_we_vote_ids_for_public_serialized):
activity_tidbit_we_vote_id_list_for_public = \
json.loads(activity_notice_seed.activity_tidbit_we_vote_ids_for_public_serialized)
if len(activity_tidbit_we_vote_id_list_for_public) > 0:
activity_tidbit_we_vote_id = activity_tidbit_we_vote_id_list_for_public.pop()
if positive_value_exists(activity_tidbit_we_vote_id):
number_of_comments = activity_manager.fetch_number_of_comments(
parent_we_vote_id=activity_tidbit_we_vote_id)
number_of_likes = reaction_manager.fetch_number_of_likes(activity_tidbit_we_vote_id)
kind_of_notice = NOTICE_FRIEND_ACTIVITY_POSTS
for friend_voter in current_friend_list:
# ###########################
# NOTE: We call update_or_create_voter_daily_summary_seeds_from_seed from the same place
# (process_activity_notice_seeds_triggered_by_batch_process) we call the function
# we are currently in. We don't do it here.
# ###########################
# This is the entry that goes in the header drop-down
activity_results = update_or_create_activity_notice_for_friend_posts(
activity_notice_seed_id=activity_notice_seed.id,
activity_tidbit_we_vote_id=activity_tidbit_we_vote_id,
kind_of_seed=activity_notice_seed.kind_of_seed,
kind_of_notice=kind_of_notice,
number_of_comments=number_of_comments,
number_of_likes=number_of_likes,
recipient_voter_we_vote_id=friend_voter.we_vote_id,
send_to_email=False,
send_to_sms=False,
speaker_name=activity_notice_seed.speaker_name,
speaker_organization_we_vote_id=activity_notice_seed.speaker_organization_we_vote_id,
speaker_voter_we_vote_id=activity_notice_seed.speaker_voter_we_vote_id,
speaker_profile_image_url_medium=activity_notice_seed.speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=activity_notice_seed.speaker_profile_image_url_tiny,
statement_text_preview=activity_notice_seed.statement_text_preview)
if activity_results['success']:
activity_notice_count += 1
else:
status += activity_results['status']
elif activity_notice_seed.kind_of_seed == NOTICE_VOTER_DAILY_SUMMARY_SEED:
# kind_of_notice = NOTICE_VOTER_DAILY_SUMMARY
# DALE Sept 6 2020: We don't want to create an ActivityNotice. We just send email directly from SEED
pass
else:
status += "CREATE_ACTIVITY_NOTICES_FROM_SEED-NO_FRIENDS "
try:
activity_notice_seed.activity_notices_created = True
activity_notice_seed.save()
status += "CREATE_ACTIVITY_NOTICES_FROM_SEED-MARKED_CREATED "
except Exception as e:
status += "CREATE_ACTIVITY_NOTICES_FROM_SEED-CANNOT_MARK_NOTICES_CREATED: " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'activity_notice_count': activity_notice_count,
}
return results
def update_or_create_voter_daily_summary_seed(
recipient_name='',
recipient_voter_we_vote_id='',
send_to_email=False,
send_to_sms=False,
speaker_organization_we_vote_id='',
speaker_voter_we_vote_id='',
update_only=False):
"""
:param recipient_name:
:param recipient_voter_we_vote_id:
:param send_to_email:
:param send_to_sms:
:param speaker_organization_we_vote_id: The person's organization who has done something
:param speaker_voter_we_vote_id: The person who has done something
:param update_only:
:return:
"""
status = ''
success = True
activity_manager = ActivityManager()
results = activity_manager.retrieve_recent_activity_notice_seed_from_listener(
kind_of_seed=NOTICE_VOTER_DAILY_SUMMARY_SEED,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
)
if results['activity_notice_seed_found']:
status += "WE_DO_NOT_NEED_TO_UPDATE_NOTICE_VOTER_DAILY_SUMMARY_SEED "
# activity_notice_seed = results['activity_notice_seed']
# change_detected = False
# try:
# # DALE Sept 6, 2020: I'm not 100% sure we need to update NOTICE_VOTER_DAILY_SUMMARY_SEED with this data
# # since when we generate the daily summary email we are just querying against activity since the last
# # summary was sent.
# if positive_value_exists(speaker_organization_we_vote_id):
# speaker_organization_we_vote_ids = []
# if positive_value_exists(activity_notice_seed.speaker_organization_we_vote_ids_serialized):
# # Deserialize
# speaker_organization_we_vote_ids = \
# json.loads(activity_notice_seed.speaker_organization_we_vote_ids_serialized)
# if speaker_organization_we_vote_id not in speaker_organization_we_vote_ids:
# speaker_organization_we_vote_ids.append(speaker_organization_we_vote_id)
# change_detected = True
# # Then serialize
# speaker_organization_we_vote_ids_serialized = json.dumps(speaker_organization_we_vote_ids)
# activity_notice_seed.speaker_organization_we_vote_ids_serialized = \
# speaker_organization_we_vote_ids_serialized
#
# if positive_value_exists(speaker_voter_we_vote_id):
# speaker_voter_we_vote_ids = []
# if positive_value_exists(activity_notice_seed.speaker_voter_we_vote_ids_serialized):
# # Deserialize
# speaker_voter_we_vote_ids = json.loads(activity_notice_seed.speaker_voter_we_vote_ids_serialized)
# if speaker_voter_we_vote_id not in speaker_voter_we_vote_ids:
# speaker_voter_we_vote_ids.append(speaker_voter_we_vote_id)
# change_detected = True
# # Then serialize
# speaker_voter_we_vote_ids_serialized = json.dumps(speaker_voter_we_vote_ids)
# activity_notice_seed.speaker_voter_we_vote_ids_serialized = speaker_voter_we_vote_ids_serialized
#
# if activity_notice_seed.recipient_name != recipient_name:
# activity_notice_seed.recipient_name = recipient_name
# change_detected = True
# if positive_value_exists(change_detected):
# activity_notice_seed.save()
# except Exception as e:
# status += "COULD_NOT_UPDATE_ACTIVITY_NOTICE_SEED_FOR_POSTS: " + str(e) + " "
# status += results['status']
elif update_only:
status += "DID_NOT_CREATE_SEED-UPDATE_ONLY_MODE "
elif results['success']:
if positive_value_exists(send_to_email) or positive_value_exists(send_to_sms):
date_of_notice = now()
speaker_organization_we_vote_ids = [speaker_organization_we_vote_id]
speaker_organization_we_vote_ids_serialized = json.dumps(speaker_organization_we_vote_ids)
speaker_voter_we_vote_ids = [speaker_voter_we_vote_id]
speaker_voter_we_vote_ids_serialized = json.dumps(speaker_voter_we_vote_ids)
create_results = activity_manager.create_activity_notice_seed(
date_of_notice=date_of_notice,
kind_of_seed=NOTICE_VOTER_DAILY_SUMMARY_SEED,
recipient_name=recipient_name,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
send_to_email=send_to_email,
send_to_sms=send_to_sms,
speaker_organization_we_vote_ids_serialized=speaker_organization_we_vote_ids_serialized,
speaker_voter_we_vote_ids_serialized=speaker_voter_we_vote_ids_serialized)
status += create_results['status']
else:
status += "NOT_SENDING-NEITHER_SEND_TO_EMAIL_NOR_SMS_SET "
else:
status += results['status']
results = {
'success': success,
'status': status,
}
return results
def update_or_create_voter_daily_summary_seeds_from_seed(activity_notice_seed):
"""
Take in seeds like NOTICE_ACTIVITY_POST_SEED and create a NOTICE_VOTER_DAILY_SUMMARY_SEED for
each of the speaker_voter's friends
:param activity_notice_seed:
:return:
"""
status = ''
success = True
activity_notice_count = 0
friend_manager = FriendManager()
seed_types_that_always_cause_the_creation_of_voter_daily_summary_seed = [NOTICE_ACTIVITY_POST_SEED]
# Who needs to see a notice?
audience = 'FRIENDS'
# audience = 'ONE_FRIEND'
if audience == 'FRIENDS':
# Retrieve all friends of activity_notice_seed.speaker_voter_we_vote_id
status += "KIND_OF_LIST-CURRENT_FRIENDS "
retrieve_current_friends_as_voters_results = \
friend_manager.retrieve_current_friends_as_voters(activity_notice_seed.speaker_voter_we_vote_id)
success = retrieve_current_friends_as_voters_results['success']
status += retrieve_current_friends_as_voters_results['status']
if retrieve_current_friends_as_voters_results['friend_list_found']:
current_friend_list = retrieve_current_friends_as_voters_results['friend_list']
for friend_voter in current_friend_list:
create_voter_daily_summary_seed_for_this_voter = False
update_only = False
if activity_notice_seed.kind_of_seed == NOTICE_FRIEND_ENDORSEMENTS_SEED:
# Add friend endorsements to a daily summary of activity: NOTICE_VOTER_DAILY_SUMMARY
# if a NOTICE_VOTER_DAILY_SUMMARY has already been created
# OR if this voter has this notification setting turned off
create_voter_daily_summary_seed_for_this_voter = True
opinions_email_turned_on = friend_voter.is_notification_status_flag_set(
NOTIFICATION_FRIEND_OPINIONS_OTHER_REGIONS_EMAIL)
if positive_value_exists(opinions_email_turned_on):
# Since the friend_voter is already getting a notice about the speaker_voter's endorsements
# don't create a VOTER_DAILY_SUMMARY *just* for NOTICE_FRIEND_ENDORSEMENTS
# but updating is ok.
update_only = True
elif activity_notice_seed.kind_of_seed \
in seed_types_that_always_cause_the_creation_of_voter_daily_summary_seed:
create_voter_daily_summary_seed_for_this_voter = True
# Decide whether to send email or sms based on friend's notification settings
send_to_email = friend_voter.is_notification_status_flag_set(
NOTIFICATION_VOTER_DAILY_SUMMARY_EMAIL)
send_to_sms = friend_voter.is_notification_status_flag_set(
NOTIFICATION_VOTER_DAILY_SUMMARY_SMS)
if create_voter_daily_summary_seed_for_this_voter:
results = update_or_create_voter_daily_summary_seed(
recipient_name=friend_voter.get_full_name(real_name_only=True),
recipient_voter_we_vote_id=friend_voter.we_vote_id,
send_to_email=send_to_email,
send_to_sms=send_to_sms,
speaker_organization_we_vote_id=activity_notice_seed.speaker_organization_we_vote_id,
speaker_voter_we_vote_id=activity_notice_seed.speaker_voter_we_vote_id,
update_only=update_only,
)
status += results['status']
else:
status += "CREATE_DAILY_SUMMARY_FROM_SEED-NO_FRIENDS "
try:
activity_notice_seed.added_to_voter_daily_summary = True
activity_notice_seed.save()
status += "MARKED_ADDED_TO_VOTER_DAILY_SUMMARY "
except Exception as e:
status += "ADDED_TO_VOTER_DAILY_SUMMARY-CANNOT_MARK_CREATED: " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'activity_notice_count': activity_notice_count,
}
return results
def schedule_activity_notices_from_seed(activity_notice_seed):
status = ''
success = True
activity_notice_count = 0
activity_manager = ActivityManager()
# This is a switch with different branches for NOTICE_FRIEND_ENDORSEMENTS_SEED
# and NOTICE_VOTER_DAILY_SUMMARY_SEED
if activity_notice_seed.kind_of_seed in [NOTICE_FRIEND_ENDORSEMENTS_SEED]:
# Schedule/send emails
# For these kind of seeds, we just send an email notification for the activity_notice (that is displayed
# to each voter in the header bar
continue_retrieving = True
activity_notice_id_already_reviewed_list = []
safety_valve_count = 0
while continue_retrieving and safety_valve_count < 500: # Current limit of 5,000 friends
safety_valve_count += 1
results = activity_manager.retrieve_activity_notice_list(
activity_notice_seed_id=activity_notice_seed.id,
to_be_sent_to_email=True,
retrieve_count_limit=100,
activity_notice_id_already_reviewed_list=activity_notice_id_already_reviewed_list,
)
if not results['success']:
status += results['status']
elif results['activity_notice_list_found']:
position_name_list = []
if positive_value_exists(activity_notice_seed.position_names_for_friends_serialized):
position_name_list_for_friends = \
json.loads(activity_notice_seed.position_names_for_friends_serialized)
position_name_list += position_name_list_for_friends
if positive_value_exists(activity_notice_seed.position_names_for_public_serialized):
position_name_list_for_public = \
json.loads(activity_notice_seed.position_names_for_public_serialized)
position_name_list += position_name_list_for_public
activity_notice_list = results['activity_notice_list']
for activity_notice in activity_notice_list:
send_results = notice_friend_endorsements_send(
speaker_voter_we_vote_id=activity_notice.speaker_voter_we_vote_id,
recipient_voter_we_vote_id=activity_notice.recipient_voter_we_vote_id,
activity_tidbit_we_vote_id=activity_notice_seed.we_vote_id,
position_name_list=position_name_list)
activity_notice_id_already_reviewed_list.append(activity_notice.id)
if send_results['success']:
try:
activity_notice.scheduled_to_email = True
activity_notice.sent_to_email = True
activity_notice.scheduled_to_sms = True
activity_notice.sent_to_sms = True
activity_notice.save()
activity_notice_count += 1
# We'll want to create a routine that connects up to the SendGrid API to tell us
# when the message was received or bounced
except Exception as e:
status += "FAILED_SAVING_ACTIVITY_NOTICE: " + str(e) + " "
else:
status += send_results['status']
else:
continue_retrieving = False
try:
activity_notice_seed.activity_notices_scheduled = True
activity_notice_seed.save()
status += "SCHEDULE_ACTIVITY_NOTICES_FROM_SEED-MARKED_CREATED "
except Exception as e:
status += "SCHEDULE_ACTIVITY_NOTICES_FROM_SEED-CANNOT_MARK_NOTICES_CREATED: " + str(e) + " "
success = False
elif activity_notice_seed.kind_of_seed == NOTICE_VOTER_DAILY_SUMMARY_SEED:
# Make this either when the last SEED was created OR 24 hours ago
assemble_activity_start_date = now() - timedelta(hours=24)
assemble_results = assemble_voter_daily_summary(
assemble_activity_start_date=assemble_activity_start_date,
recipient_voter_we_vote_id=activity_notice_seed.recipient_voter_we_vote_id,
)
send_results = notice_voter_daily_summary_send(
recipient_voter_we_vote_id=activity_notice_seed.recipient_voter_we_vote_id,
friend_activity_dict_list=assemble_results['friend_activity_dict_list'],
introduction_line=assemble_results['introduction_line'],
subject=assemble_results['subject'])
if send_results['success']:
try:
activity_notice_seed.activity_notices_scheduled = True
activity_notice_seed.scheduled_to_email = True
activity_notice_seed.sent_to_email = True
# activity_notice_seed.scheduled_to_sms = True
# activity_notice_seed.sent_to_sms = True
activity_notice_seed.save()
activity_notice_count += 1
# We'll want to create a routine that connects up to the SendGrid API to tell us
# when the message was received or bounced
except Exception as e:
status += "FAILED_SAVING_ACTIVITY_NOTICE_SEED: " + str(e) + " "
pass
else:
status += send_results['status']
# # Schedule/send sms
# results = activity_manager.retrieve_activity_notice_list(
# activity_notice_seed_id=activity_notice_seed.id,
# to_be_sent_to_sms=True,
# )
results = {
'success': success,
'status': status,
'activity_notice_count': activity_notice_count,
}
return results
def update_or_create_activity_notice_for_friend_endorsements(
activity_notice_seed_id=0,
activity_tidbit_we_vote_id='',
kind_of_seed='',
kind_of_notice='',
position_name_list_serialized='',
position_we_vote_id_list_serialized='',
recipient_voter_we_vote_id='',
send_to_email=False,
send_to_sms=False,
speaker_name='',
speaker_organization_we_vote_id='',
speaker_voter_we_vote_id='',
speaker_profile_image_url_medium='',
speaker_profile_image_url_tiny=''):
status = ''
success = True
activity_manager = ActivityManager()
results = activity_manager.retrieve_recent_activity_notice_from_speaker_and_recipient(
activity_notice_seed_id=activity_notice_seed_id,
kind_of_notice=kind_of_notice,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
)
# Combine friends and public into single position_we_vote_id_list_serialized
if results['activity_notice_found']:
try:
activity_notice = results['activity_notice']
activity_notice.position_name_list_serialized = position_name_list_serialized
activity_notice.position_we_vote_id_list_serialized = position_we_vote_id_list_serialized
if positive_value_exists(activity_tidbit_we_vote_id):
activity_notice.activity_tidbit_we_vote_id = activity_tidbit_we_vote_id
activity_notice.save()
except Exception as e:
status += "FAILED_ACTIVITY_NOTICE_SAVE: " + str(e) + ' '
status += results['status']
elif results['success']:
date_of_notice = now()
create_results = activity_manager.create_activity_notice(
activity_notice_seed_id=activity_notice_seed_id,
activity_tidbit_we_vote_id=activity_tidbit_we_vote_id,
date_of_notice=date_of_notice,
kind_of_notice=kind_of_notice,
kind_of_seed=kind_of_seed,
position_name_list_serialized=position_name_list_serialized,
position_we_vote_id_list_serialized=position_we_vote_id_list_serialized,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
send_to_email=send_to_email,
send_to_sms=send_to_sms,
speaker_name=speaker_name,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
status += create_results['status']
else:
status += results['status']
results = {
'success': success,
'status': status,
}
return results
def update_or_create_activity_notice_for_friend_posts(
activity_notice_seed_id=0,
activity_tidbit_we_vote_id='',
kind_of_seed='',
kind_of_notice='',
number_of_comments=0,
number_of_likes=0,
recipient_voter_we_vote_id='',
send_to_email=False,
send_to_sms=False,
speaker_name='',
speaker_organization_we_vote_id='',
speaker_voter_we_vote_id='',
speaker_profile_image_url_medium='',
speaker_profile_image_url_tiny='',
statement_text_preview=''):
status = ''
success = True
activity_manager = ActivityManager()
results = activity_manager.retrieve_recent_activity_notice_from_speaker_and_recipient(
activity_notice_seed_id=activity_notice_seed_id,
kind_of_notice=kind_of_notice,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
)
if results['activity_notice_found']:
try:
activity_notice = results['activity_notice']
change_found = False
if positive_value_exists(activity_tidbit_we_vote_id) and \
activity_tidbit_we_vote_id != activity_notice.activity_tidbit_we_vote_id:
activity_notice.activity_tidbit_we_vote_id = activity_tidbit_we_vote_id
change_found = True
if positive_value_exists(number_of_comments) and number_of_comments != activity_notice.number_of_comments:
activity_notice.number_of_comments = number_of_comments
change_found = True
if positive_value_exists(number_of_likes) and number_of_likes != activity_notice.number_of_likes:
activity_notice.number_of_likes = number_of_likes
change_found = True
if positive_value_exists(speaker_name) and speaker_name != activity_notice.speaker_name:
activity_notice.speaker_name = speaker_name
change_found = True
if positive_value_exists(statement_text_preview) and \
statement_text_preview != activity_notice.statement_text_preview:
activity_notice.statement_text_preview = statement_text_preview
change_found = True
if change_found:
activity_notice.save()
except Exception as e:
status += "FAILED_ACTIVITY_NOTICE_SAVE: " + str(e) + ' '
status += results['status']
elif results['success']:
date_of_notice = now()
create_results = activity_manager.create_activity_notice(
activity_notice_seed_id=activity_notice_seed_id,
activity_tidbit_we_vote_id=activity_tidbit_we_vote_id,
date_of_notice=date_of_notice,
kind_of_notice=kind_of_notice,
kind_of_seed=kind_of_seed,
number_of_comments=number_of_comments,
number_of_likes=number_of_likes,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
send_to_email=send_to_email,
send_to_sms=send_to_sms,
speaker_name=speaker_name,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny,
statement_text_preview=statement_text_preview)
status += create_results['status']
else:
status += results['status']
results = {
'success': success,
'status': status,
}
return results
def update_or_create_activity_notice_seed_for_activity_posts(
activity_post_we_vote_id='',
visibility_is_public=False,
speaker_name='',
speaker_organization_we_vote_id='',
speaker_voter_we_vote_id='',
speaker_profile_image_url_medium='',
speaker_profile_image_url_tiny='',
statement_text=''):
"""
NOTE: This is tied to ANY activity_posts
:param activity_post_we_vote_id: Not used for updates
:param visibility_is_public: Not used for updates
:param speaker_name:
:param speaker_organization_we_vote_id:
:param speaker_voter_we_vote_id:
:param speaker_profile_image_url_medium:
:param speaker_profile_image_url_tiny:
:param statement_text:
:return:
"""
status = ''
success = True
activity_manager = ActivityManager()
results = activity_manager.retrieve_recent_activity_notice_seed_from_speaker(
kind_of_seed=NOTICE_ACTIVITY_POST_SEED,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
)
if results['activity_notice_seed_found']:
activity_notice_seed = results['activity_notice_seed']
try:
# This SEED might have multiple ActivityPost entries associated with it
most_recent_activity_post = None
most_recent_activity_post_date = None
# Since the activity is being saved microseconds before the activity_notice_seed is stored, we want to
# "rewind" the date_of_notice by 60 seconds
since_date = activity_notice_seed.date_of_notice - timedelta(seconds=60)
post_results = activity_manager.retrieve_activity_post_list(
speaker_voter_we_vote_id_list=[speaker_voter_we_vote_id],
since_date=since_date,
limit_to_visibility_is_friends_only=True)
activity_tidbit_we_vote_ids_for_friends = []
activity_tidbit_we_vote_ids_for_friends_serialized = None
if post_results['success']:
friends_post_list = post_results['activity_post_list']
for one_post in friends_post_list:
activity_tidbit_we_vote_ids_for_friends.append(one_post.we_vote_id)
if not one_post.date_created:
pass
elif most_recent_activity_post_date and one_post.date_created < most_recent_activity_post_date:
pass
else:
most_recent_activity_post_date = one_post.date_created
most_recent_activity_post = one_post
activity_tidbit_we_vote_ids_for_friends_serialized = json.dumps(activity_tidbit_we_vote_ids_for_friends)
post_results = activity_manager.retrieve_activity_post_list(
speaker_voter_we_vote_id_list=[speaker_voter_we_vote_id],
since_date=since_date,
limit_to_visibility_is_public=True)
activity_tidbit_we_vote_ids_for_public = []
activity_tidbit_we_vote_ids_for_public_serialized = None
if post_results['success']:
public_post_list = post_results['activity_post_list']
for one_post in public_post_list:
activity_tidbit_we_vote_ids_for_public.append(one_post.we_vote_id)
if not one_post.date_created:
pass
elif most_recent_activity_post_date and one_post.date_created < most_recent_activity_post_date:
pass
else:
most_recent_activity_post_date = one_post.date_created
most_recent_activity_post = one_post
activity_tidbit_we_vote_ids_for_public_serialized = json.dumps(activity_tidbit_we_vote_ids_for_public)
activity_notice_seed.activity_tidbit_we_vote_ids_for_friends_serialized = \
activity_tidbit_we_vote_ids_for_friends_serialized
activity_notice_seed.activity_tidbit_we_vote_ids_for_public_serialized = \
activity_tidbit_we_vote_ids_for_public_serialized
activity_notice_seed.speaker_name = speaker_name
activity_notice_seed.speaker_profile_image_url_medium = speaker_profile_image_url_medium
activity_notice_seed.speaker_profile_image_url_tiny = speaker_profile_image_url_tiny
if most_recent_activity_post and most_recent_activity_post.statement_text:
activity_notice_seed.statement_text_preview = most_recent_activity_post.statement_text[0:75]
activity_notice_seed.save()
except Exception as e:
status += "COULD_NOT_UPDATE_ACTIVITY_NOTICE_SEED_FOR_POSTS: " + str(e) + " "
status += results['status']
elif results['success']:
date_of_notice = now()
activity_tidbit_we_vote_ids_for_friends = []
activity_tidbit_we_vote_ids_for_friends_serialized = None
activity_tidbit_we_vote_ids_for_public = []
activity_tidbit_we_vote_ids_for_public_serialized = None
if positive_value_exists(visibility_is_public):
activity_tidbit_we_vote_ids_for_public.append(activity_post_we_vote_id)
activity_tidbit_we_vote_ids_for_public_serialized = json.dumps(activity_tidbit_we_vote_ids_for_public)
else:
activity_tidbit_we_vote_ids_for_friends.append(activity_post_we_vote_id)
activity_tidbit_we_vote_ids_for_friends_serialized = json.dumps(activity_tidbit_we_vote_ids_for_friends)
if positive_value_exists(statement_text):
statement_text_preview = statement_text[0:75]
else:
statement_text_preview = ''
create_results = activity_manager.create_activity_notice_seed(
activity_notices_scheduled=True, # Set this to true so it gets ignored by the email-sending routine
activity_tidbit_we_vote_ids_for_friends_serialized=activity_tidbit_we_vote_ids_for_friends_serialized,
activity_tidbit_we_vote_ids_for_public_serialized=activity_tidbit_we_vote_ids_for_public_serialized,
date_of_notice=date_of_notice,
kind_of_seed=NOTICE_ACTIVITY_POST_SEED,
speaker_name=speaker_name,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny,
statement_text_preview=statement_text_preview)
status += create_results['status']
else:
status += results['status']
results = {
'success': success,
'status': status,
}
return results
def update_or_create_activity_notice_seed_for_voter_position(
position_ballot_item_display_name='',
position_we_vote_id='',
is_public_position=False,
speaker_name='',
speaker_organization_we_vote_id='',
speaker_voter_we_vote_id='',
speaker_profile_image_url_medium='',
speaker_profile_image_url_tiny=''):
"""
:param position_ballot_item_display_name: Not used for updates
:param position_we_vote_id: Not used for updates
:param is_public_position: Not used for updates
:param speaker_name:
:param speaker_organization_we_vote_id:
:param speaker_voter_we_vote_id:
:param speaker_profile_image_url_medium:
:param speaker_profile_image_url_tiny:
:return:
"""
status = ''
success = True
activity_manager = ActivityManager()
from position.models import PositionListManager
position_list_manager = PositionListManager()
results = activity_manager.retrieve_recent_activity_notice_seed_from_speaker(
kind_of_seed=NOTICE_FRIEND_ENDORSEMENTS_SEED,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
)
if results['activity_notice_seed_found']:
activity_notice_seed = results['activity_notice_seed']
try:
# Since the position is being saved microseconds before the activity_notice_seed is stored, we want to
# "rewind" the date_of_notice by 60 seconds
since_date = activity_notice_seed.date_of_notice - timedelta(seconds=60)
position_results = position_list_manager.retrieve_all_positions_for_voter(
voter_we_vote_id=speaker_voter_we_vote_id,
since_date=since_date)
if position_results['success']:
friends_positions_list = position_results['friends_positions_list']
position_name_list_for_friends = []
position_we_vote_id_list_for_friends = []
for one_position in friends_positions_list:
position_name_list_for_friends.append(one_position.ballot_item_display_name)
position_we_vote_id_list_for_friends.append(one_position.we_vote_id)
position_names_for_friends_serialized = json.dumps(position_name_list_for_friends)
position_we_vote_ids_for_friends_serialized = json.dumps(position_we_vote_id_list_for_friends)
public_positions_list = position_results['public_positions_list']
position_name_list_for_public = []
position_we_vote_id_list_for_public = []
for one_position in public_positions_list:
position_name_list_for_public.append(one_position.ballot_item_display_name)
position_we_vote_id_list_for_public.append(one_position.we_vote_id)
position_names_for_public_serialized = json.dumps(position_name_list_for_public)
position_we_vote_ids_for_public_serialized = json.dumps(position_we_vote_id_list_for_public)
else:
# If here, there was a problem retrieving positions since the activity_notice_seed was saved,
# so we just work with the one position_we_vote_id
if is_public_position:
position_names_for_friends_serialized = None
position_name_list_for_public = [position_ballot_item_display_name]
position_names_for_public_serialized = json.dumps(position_name_list_for_public)
position_we_vote_ids_for_friends_serialized = None
position_we_vote_id_list_for_public = [position_we_vote_id]
position_we_vote_ids_for_public_serialized = json.dumps(position_we_vote_id_list_for_public)
else:
position_name_list_for_friends = [position_ballot_item_display_name]
position_names_for_friends_serialized = json.dumps(position_name_list_for_friends)
position_names_for_public_serialized = None
position_we_vote_id_list_for_friends = [position_we_vote_id]
position_we_vote_ids_for_friends_serialized = json.dumps(position_we_vote_id_list_for_friends)
position_we_vote_ids_for_public_serialized = None
activity_notice_seed.position_names_for_friends_serialized = position_names_for_friends_serialized
activity_notice_seed.position_names_for_public_serialized = position_names_for_public_serialized
activity_notice_seed.position_we_vote_ids_for_friends_serialized = \
position_we_vote_ids_for_friends_serialized
activity_notice_seed.position_we_vote_ids_for_public_serialized = \
position_we_vote_ids_for_public_serialized
activity_notice_seed.speaker_name = speaker_name
activity_notice_seed.speaker_profile_image_url_medium = speaker_profile_image_url_medium
activity_notice_seed.speaker_profile_image_url_tiny = speaker_profile_image_url_tiny
activity_notice_seed.save()
except Exception as e:
status += "COULD_NOT_UPDATE_SPEAKER_IMAGES " + str(e) + " "
status += results['status']
elif results['success']:
date_of_notice = now()
if is_public_position:
position_name_list_for_public = [position_ballot_item_display_name]
position_names_for_public_serialized = json.dumps(position_name_list_for_public)
position_names_for_friends_serialized = None
position_we_vote_id_list_for_public = [position_we_vote_id]
position_we_vote_ids_for_public_serialized = json.dumps(position_we_vote_id_list_for_public)
position_we_vote_ids_for_friends_serialized = None
else:
position_name_list_for_friends = [position_ballot_item_display_name]
position_names_for_friends_serialized = json.dumps(position_name_list_for_friends)
position_names_for_public_serialized = None
position_we_vote_id_list_for_friends = [position_we_vote_id]
position_we_vote_ids_for_friends_serialized = json.dumps(position_we_vote_id_list_for_friends)
position_we_vote_ids_for_public_serialized = None
create_results = activity_manager.create_activity_notice_seed(
date_of_notice=date_of_notice,
kind_of_seed=NOTICE_FRIEND_ENDORSEMENTS_SEED,
position_names_for_friends_serialized=position_names_for_friends_serialized,
position_names_for_public_serialized=position_names_for_public_serialized,
position_we_vote_ids_for_friends_serialized=position_we_vote_ids_for_friends_serialized,
position_we_vote_ids_for_public_serialized=position_we_vote_ids_for_public_serialized,
speaker_name=speaker_name,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny)
status += create_results['status']
else:
status += results['status']
results = {
'success': success,
'status': status,
}
return results
def update_activity_notice_seed_with_positions(activity_notice_seed):
status = ''
success = True
activity_notice_seed_changed = False
from activity.models import get_lifespan_of_seed
kind_of_seed = NOTICE_FRIEND_ENDORSEMENTS_SEED
lifespan_of_seed_in_seconds = get_lifespan_of_seed(kind_of_seed) # In seconds
earliest_date_of_notice = now() - timedelta(seconds=lifespan_of_seed_in_seconds)
# Is this activity_notice_seed.date_of_notice older than earliest_date_of_notice?
if activity_notice_seed.date_of_notice < earliest_date_of_notice:
try:
activity_notice_seed.date_of_notice_earlier_than_update_window = True
activity_notice_seed.save()
activity_notice_seed_changed = True
except Exception as e:
status += "COULD_NOT_UPDATE-date_of_notice_earlier_than_update_window: " + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_notice_seed': activity_notice_seed,
'activity_notice_seed_changed': activity_notice_seed_changed,
'date_of_notice_earlier_than_update_window': True,
}
return results
# What values currently exist? We deserialize so we can compare with latest positions
# Position names
position_name_list_for_friends = []
if positive_value_exists(activity_notice_seed.position_names_for_friends_serialized):
position_name_list_for_friends = json.loads(activity_notice_seed.position_names_for_friends_serialized)
position_name_list_for_public = []
if positive_value_exists(activity_notice_seed.position_names_for_public_serialized):
position_name_list_for_public = json.loads(activity_notice_seed.position_names_for_public_serialized)
# Position we_vote_ids
position_we_vote_id_list_for_friends = []
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_friends_serialized):
position_we_vote_id_list_for_friends = \
json.loads(activity_notice_seed.position_we_vote_ids_for_friends_serialized)
position_we_vote_id_list_for_public = []
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_public_serialized):
position_we_vote_id_list_for_public = \
json.loads(activity_notice_seed.position_we_vote_ids_for_public_serialized)
from position.models import PositionListManager
position_list_manager = PositionListManager()
since_date = activity_notice_seed.date_of_notice - timedelta(seconds=60)
position_results = position_list_manager.retrieve_all_positions_for_voter(
voter_we_vote_id=activity_notice_seed.speaker_voter_we_vote_id,
since_date=since_date)
if position_results['success']:
friends_positions_list = position_results['friends_positions_list']
position_name_list_for_friends_latest = []
position_we_vote_id_list_for_friends_latest = []
for one_position in friends_positions_list:
position_name_list_for_friends_latest.append(one_position.ballot_item_display_name)
position_we_vote_id_list_for_friends_latest.append(one_position.we_vote_id)
public_positions_list = position_results['public_positions_list']
position_name_list_for_public_latest = []
position_we_vote_id_list_for_public_latest = []
for one_position in public_positions_list:
position_name_list_for_public_latest.append(one_position.ballot_item_display_name)
position_we_vote_id_list_for_public_latest.append(one_position.we_vote_id)
friends_name_list_different = set(position_name_list_for_friends) != \
set(position_name_list_for_friends_latest)
public_name_list_different = set(position_name_list_for_public) != \
set(position_name_list_for_public_latest)
friends_we_vote_id_list_different = set(position_we_vote_id_list_for_friends) != \
set(position_we_vote_id_list_for_friends_latest)
public_we_vote_id_list_different = set(position_we_vote_id_list_for_public) != \
set(position_we_vote_id_list_for_public_latest)
if friends_name_list_different or public_name_list_different or \
friends_we_vote_id_list_different or public_we_vote_id_list_different:
try:
activity_notice_seed.position_names_for_friends_serialized = \
json.dumps(position_name_list_for_friends_latest)
activity_notice_seed.position_names_for_public_serialized = \
json.dumps(position_name_list_for_public_latest)
activity_notice_seed.position_we_vote_ids_for_friends_serialized = \
json.dumps(position_we_vote_id_list_for_friends_latest)
activity_notice_seed.position_we_vote_ids_for_public_serialized = \
json.dumps(position_we_vote_id_list_for_public_latest)
activity_notice_seed.save()
activity_notice_seed_changed = True
except Exception as e:
success = False
status += "COULD_NOT_SAVE: " + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_notice_seed': activity_notice_seed,
'activity_notice_seed_changed': activity_notice_seed_changed,
'date_of_notice_earlier_than_update_window': False,
}
return results
def voter_activity_notice_list_retrieve_for_api(voter_device_id): # voterActivityNoticeListRetrieve
"""
See: activity_notice_list_retrieve_view in apis_v1/views/views_activity.py
:param voter_device_id:
:return:
"""
activity_notice_list_found = False
status = ""
success = True
# If a voter_device_id is passed in that isn't valid, we want to throw an error
device_id_results = is_voter_device_id_valid(voter_device_id)
if not device_id_results['success']:
json_data = {
'status': device_id_results['status'],
'success': False,
'voter_device_id': voter_device_id,
'activity_notice_list_found': False,
'activity_notice_list': [],
}
return json_data
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if not voter_results['voter_found']:
status += "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID "
error_results = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'activity_notice_list_found': False,
'activity_notice_list': [],
}
return error_results
voter = voter_results['voter']
voter_we_vote_id = voter.we_vote_id
activity_notice_list_augmented = []
# sms_manager = SMSManager()
# merge_results = sms_manager.find_and_merge_all_duplicate_sms(voter_we_vote_id)
# status += merge_results['status']
#
#
# sms_results = sms_manager.retrieve_voter_activity_notice_list(voter_we_vote_id)
# status += sms_results['status']
# if sms_results['activity_notice_list_found']:
# activity_notice_list_found = True
# activity_notice_list = sms_results['activity_notice_list']
#
# # Remove duplicates: sms_we_vote_id
# merge_results = heal_primary_sms_data_for_voter(activity_notice_list, voter)
# activity_notice_list = merge_results['activity_notice_list']
# status += merge_results['status']
#
# augment_results = augment_activity_notice_list(activity_notice_list, voter)
# activity_notice_list_augmented = augment_results['activity_notice_list']
# status += augment_results['status']
json_data = {
'status': status,
'success': success,
'voter_device_id': voter_device_id,
'activity_notice_list_found': activity_notice_list_found,
'activity_notice_list': activity_notice_list_augmented,
}
return json_data
| {
"repo_name": "wevote/WeVoteServer",
"path": "activity/controllers.py",
"copies": "1",
"size": "109685",
"license": "mit",
"hash": 584912442541290900,
"line_mean": 51.106888361,
"line_max": 120,
"alpha_frac": 0.6082326663,
"autogenerated": false,
"ratio": 3.7671726885561205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48754053548561205,
"avg_score": null,
"num_lines": null
} |
"""Activity data from the Fitbit API.
A note on "creds" parameters:
The dict is expected to have these keys:
- client_id
- client_secret
- authorization_code
It may also have these keys, and they may be updated during function calls:
- access_token
- refresh_token
"""
import logging
from base64 import b64encode
from datetime import datetime
import httpx
_logger = logging.getLogger(__name__)
class CredentialsError(Exception):
"""Credentials are invalid (e.g. empty or expired)."""
async def get_activity(creds: dict, for_date: datetime) -> dict:
"""Get activity data for the given date."""
date = for_date.strftime("%Y-%m-%d")
url_path = f"activities/date/{date}.json"
return await _api_request(creds, url_path)
async def _api_request(creds: dict, url_path: str) -> dict:
if not creds:
raise CredentialsError
async with httpx.AsyncClient() as client:
if not creds.get("access_token"):
await _get_access_token(client, creds)
url = "https://api.fitbit.com/1/user/-/" + url_path
try:
return await _do_resource_get(client, creds, url)
except httpx.HTTPStatusError as ex:
if ex.response.status_code == 401:
try:
await _refresh_access_token(client, creds)
return await _do_resource_get(client, creds, url)
except httpx.HTTPStatusError as ex:
if ex.response.status_code == 401:
raise CredentialsError from ex
raise
raise
async def _get_access_token(client, creds: dict) -> None:
"""Exchange an authorization code for an access token and refresh token.
https://dev.fitbit.com/build/reference/web-api/oauth2/#access_token-request
"""
post_data = {
"code": creds["authorization_code"],
"grant_type": "authorization_code",
"redirect_uri": "http://localhost:5000/fitbit",
}
data = await _do_auth_post(client, creds, post_data)
creds["access_token"] = data["access_token"]
creds["refresh_token"] = data["refresh_token"]
async def _refresh_access_token(client, creds: dict) -> None:
"""Exchange a refresh token for a new access token and refresh token.
https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens
"""
post_data = {
"refresh_token": creds["refresh_token"],
"grant_type": "refresh_token",
}
data = await _do_auth_post(client, creds, post_data)
creds["access_token"] = data["access_token"]
creds["refresh_token"] = data["refresh_token"]
async def _do_resource_get(client, creds: dict, url) -> dict:
"""Make a GET to the resource server."""
headers = {"Authorization": "Bearer " + creds["access_token"]}
response = await client.get(url, headers=headers)
response.raise_for_status()
return response.json()
async def _do_auth_post(client, creds: dict, post_data: dict) -> dict:
"""Make a POST to the authorization server."""
url = "https://api.fitbit.com/oauth2/token"
auth_value = b64encode(f"{creds['client_id']}:{creds['client_secret']}".encode())
headers = {"Authorization": "Basic " + auth_value.decode()}
response = await client.post(url, headers=headers, data=post_data)
if response.is_error:
_logger.error(response.content)
response.raise_for_status()
return response.json()
| {
"repo_name": "genericmoniker/mirror",
"path": "backend/src/mirror/plugins/activity/fitbit.py",
"copies": "1",
"size": "3452",
"license": "mit",
"hash": 4975721914366331000,
"line_mean": 33.1782178218,
"line_max": 85,
"alpha_frac": 0.6355735805,
"autogenerated": false,
"ratio": 3.7399783315276274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9874976273068383,
"avg_score": 0.00011512779184895233,
"num_lines": 101
} |
"""Activity forms"""
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from crispy_forms.helper import FormHelper
from . import BaseSuperHappyForm
from ..models.activity import Activity, ToDo
from ..models.place import Place
class ActivityForm(BaseSuperHappyForm):
"""Activity form"""
class Meta(object):
model = Activity
fields = [
'id',
'duration',
'short_description',
'long_description',
'to_dos',
'places',
]
class Media(object):
"""Static assets to use with the parent form."""
# Django also includes a few javascript files necessary
# for the operation of this form element. You need to
# include <script src="/admin/jsi18n"></script>
# in the template.
css = {
'all': ('admin/css/widgets.css',)
}
def __init__(self, *args, **kwargs):
super(ActivityForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.fields['to_dos'] = forms.ModelMultipleChoiceField(
required=False,
help_text=Activity.to_dos.field.help_text,
queryset=ToDo.objects.all().order_by('short_description'),
widget=FilteredSelectMultiple(
Activity.to_dos.field.verbose_name,
is_stacked=True,
),
)
self.fields['places'] = forms.ModelMultipleChoiceField(
required=False,
help_text=Activity.places.field.help_text,
queryset=Place.objects.all().order_by('name'),
widget=FilteredSelectMultiple(
Activity.places.field.verbose_name,
is_stacked=True,
),
)
| {
"repo_name": "jricardo27/holiday_planner",
"path": "holiday_planner/holiday_place/forms/activity.py",
"copies": "1",
"size": "1835",
"license": "bsd-3-clause",
"hash": -6901805244765087000,
"line_mean": 28.5967741935,
"line_max": 70,
"alpha_frac": 0.5776566757,
"autogenerated": false,
"ratio": 4.358669833729216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5436326509429216,
"avg_score": null,
"num_lines": null
} |
import base64,hashlib,hmac,time,urllib2
from urllib import urlencode, urlopen
from bs4 import BeautifulSoup
from datetime import datetime
class Amazon():
def BrowseNodeExtraction():
# Amazon Access Keys
AWS_ACCESS_KEY_ID = "" //Enter your access key id
AWS_SECRET_ACCESS_KEY = "" //Enter your secret access key
AWS_ASSOCIATE_TAG = "" //Enter your associate tag
# Base URL
base_url = "http://webservices.amazon.in/onca/xml"
NodeDict = {"Books" : "976389031",
"DVD" : "976416031",
"Electronics" : "976419031",
"Home&Kitchen" : "976442031",
"Jewelry" : "1951048031",
"PC HardWare" : "976392031",
"Toys" : "1350380031",
"Watches" : "1350387031"
}
for k,v in NodeDict.items():
print k
CategoryNodeID = raw_input("Enter one of the category: ")
if CategoryNodeID in NodeDict:
nodeID = NodeDict[CategoryNodeID]
else:
"Please check the category you entered"
# for more options check here http://docs.amazonwebservices.com/AWSECommerceService/latest/DG/index.html?SummaryofA2SOperations.html
url_params = {'AWSAccessKeyId':AWS_ACCESS_KEY_ID,
'AssociateTag':AWS_ASSOCIATE_TAG,
'BrowseNodeId':nodeID,
'Operation':"BrowseNodeLookup",
'Service':"AWSECommerceService",
'Timestamp': time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()),
'Version':"2011-08-01",
'Availability':"Available",
'Condition':"All",
'ResponseGroup':"BrowseNodeInfo",
}
# Sort the URL parameters by key
keys = url_params.keys()
keys.sort()
# Get the values in the same order of the sorted keys
values = map(url_params.get, keys)
# Reconstruct the URL paramters and encode them
url_string = urlencode(zip(keys,values))
url_string = url_string.replace('+',"%20")
url_string = url_string.replace(':',"%3A")
#Construct the string to sign
string_to_sign = """GET
webservices.amazon.in
/onca/xml
%s""" % url_string
# Sign the request
signature = hmac.new(
key=AWS_SECRET_ACCESS_KEY,
msg=string_to_sign,
digestmod=hashlib.sha256).digest()
# Base64 encode the signature
signature = base64.encodestring(signature)
# Make the signature URL safe
signature = urlencode({'Signature': signature})
signature = signature.replace('+','%2B')
signature = signature.replace('=',"%3D")
params = signature
url_string += "&Signature=" + params
url_open = "%s?%s" % (base_url,url_string)
Content = urllib2.urlopen(url_open).read()
soup = BeautifulSoup(Content)
soupContent = soup.find_all("browsenode")
num = 0
category = {}
for tags in soupContent:
if num > 0:
if tags.browsenodeid:
ID = tags.browsenodeid.string
if tags.find_all("name"):
Name = tags.find_all("name")[0].string
category[ID] = Name
num += 1
for key,value in category.items():
print key,":",value
def ItemSearchExtraction():
# Amazon Access Keys
AWS_ACCESS_KEY_ID = ""
AWS_SECRET_ACCESS_KEY = ""
AWS_ASSOCIATE_TAG = ""
# Base URL
base_url = "http://webservices.amazon.in/onca/xml"
ItemDict = ["Books" , "DVD" , "Electronics" , "Jewelry" , "Toys" , "Watches" ]
for values in ItemDict:
print values
CategoryName = raw_input("Enter one of the category: ")
if CategoryName in ItemDict:
category = CategoryName
category = category.replace(" ","")
# for more options check here http://docs.amazonwebservices.com/AWSECommerceService/latest/DG/index.html?SummaryofA2SOperations.html
itemNumber = 0
for x in xrange(2):
pagenumber = str(x)
url_params = {'AWSAccessKeyId':AWS_ACCESS_KEY_ID,
'AssociateTag':AWS_ASSOCIATE_TAG,
'Keywords': category,
'Operation':"ItemSearch",
'Service':"AWSECommerceService",
'Timestamp': time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()),
'Version':"2011-08-01",
'Availability':"Available",
'Condition':"All",
'ResponseGroup':"ItemAttributes, Images",
'SearchIndex':category,
'ItemPage': pagenumber
}
# Sort the URL parameters by key
keys = url_params.keys()
keys.sort()
# Get the values in the same order of the sorted keys
values = map(url_params.get, keys)
# Reconstruct the URL paramters and encode them
url_string = urlencode(zip(keys,values))
url_string = url_string.replace('+',"%20")
url_string = url_string.replace(':',"%3A")
#Construct the string to sign
string_to_sign = """GET
webservices.amazon.in
/onca/xml
%s""" % url_string
# Sign the request
signature = hmac.new(
key=AWS_SECRET_ACCESS_KEY,
msg=string_to_sign,
digestmod=hashlib.sha256).digest()
# Base64 encode the signature
signature = base64.encodestring(signature)
# Make the signature URL safe
#signature = urlencode({'Signature': signature})
signature = signature.replace('+','%2B')
signature = signature.replace('=',"%3D")
params = signature
url_string += "&Signature=" + params
url_open = "%s?%s" % (base_url,url_string)
Content = urllib2.urlopen(url_open).read()
soup = BeautifulSoup(Content)
if soup.totalresults:
print "Total Items: ",soup.totalresults.string
if soup.totalpages:
print "Total Pages: ",soup.totalpages.string
soupContent = soup.find_all("item")
print "*******************************************"
if category == "Books":
for tags in soupContent:
itemNumber += 1
if tags.asin:
bookAsin = tags.asin.string
else:
bookAsin = "NA"
if tags.detailpageurl:
bookURL = tags.detailpageurl.string
else:
bookURL = "NA"
if tags.author:
bookAuthor = tags.author.string
else:
bookAuthor = "NA"
if tags.smallimage.url:
bookSmallImage = tags.smallimage.url.string
else:
bookSmallImage = "NA"
if tags.mediumimage.url:
bookMediumImage = tags.mediumimage.url.string
else:
bookMediumImage = "NA"
if tags.largeimage.url:
bookLargeImage = tags.largeimage.url.string
else:
bookLargeImage = "NA"
if tags.binding:
bookBinding = tags.binding.string
else:
bookBinding = "NA"
if tags.brand:
bookBrand = tags.brand.string
else:
bookBrand = "NA"
if tags.ean:
bookEan = tags.ean.string
else:
bookEan = "NA"
if tags.eanlist:
if tags.eanlist.eanlistelement:
bookEanlistelement = tags.eanlist.eanlistelement.string
else:
bookEanlistelement = "NA"
if tags.edition:
bookEdition = tags.edition.string
else:
bookEdition = "NA"
if tags.format:
bookFormat = tags.format.string
else:
bookFormat = "NA"
if tags.isbn:
bookIsbn = tags.isbn.string
elif tags.eisbn:
bookIsbn = tags.eisbn.string
else:
bookIsbn = "NA"
if tags.itemdimensions:
if tags.itemdimensions.height:
bookHeight = tags.itemdimensions.height.string
bookHeight = int(bookHeight)/100
else:
bookHeight = "NA"
if tags.itemdimensions:
if tags.itemdimensions.length:
bookLength = tags.itemdimensions.length.string
bookLength = int(bookLength)/100
else:
bookLength = "NA"
if tags.itemdimensions:
if tags.itemdimensions.weight:
bookWeight = tags.itemdimensions.weight.string
bookWeight = int(bookWeight)/100
else:
bookWeight = "NA"
if tags.itemdimensions:
if tags.itemdimensions.width:
bookWidth = tags.itemdimensions.width.string
bookWidth = int(bookWidth)/100
else:
bookWidth = "NA"
if tags.label:
bookLabel = tags.label.string
bookLabel = bookLabel.replace("amp;","")
else:
bookLabel = "NA"
if tags.languages:
if tags.languages.language.find("name"):
bookLanguage = tags.languages.language.find("name").string
else:
bookLanguage = "NA"
if tags.listprice:
if tags.listprice.formattedprice:
price = tags.listprice.formattedprice.string
price = price.split(" ")
bookCurrency = price[0]
bookPrice = price[1]
else:
bookCurrency = "NA"
bookPrice = "NA"
if tags.manufacturer:
bookManufacturer = tags.manufacturer.string
else:
bookManufacturer = "NA"
if tags.numberofpages:
bookNoOfPages = tags.numberofpages.string
else:
bookNoOfPages = "NA"
if tags.productgroup:
bookProductGroup = tags.productgroup.string
else:
bookProductGroup = "NA"
if tags.producttypename:
bookProductTypeName = tags.producttypename.string
else:
bookProductTypeName = "NA"
if tags.publicationdate:
bookPublicationDate = tags.publicationdate.string
else:
bookPublicationDate = "NA"
if tags.publisher:
bookPublisher = tags.publisher.string
bookPublisher = bookPublisher.replace("amp;","")
else:
bookPublisher = "NA"
if tags.studio:
bookStudio = tags.studio.string
bookStudio = bookStudio.replace("amp;","")
else:
bookStudio = "NA"
if tags.title:
bookTitle = tags.title.string
bookTitle = bookTitle.replace("amp;","")
else:
bookTitle = "NA"
print "ItemNumber : ", itemNumber
print "Title : ", bookTitle
print "ASIN : ", bookAsin
print "URL : ", bookURL
print "Author : ", bookAuthor
print "Binding : ", bookBinding
print "EAN : ", bookEan
print "EanListelement: ", bookEanlistelement
print "Edition : ", bookEdition
print "Format : ", bookFormat
print "ISBN : ", bookIsbn
print "Height : ", bookHeight
print "Length : ", bookLength
print "Width : ", bookWidth
print "Weight : ", bookWeight
print "Label : ", bookLabel
print "Language : ", bookLanguage
print "Price : ", bookPrice
print "Currency : ", bookCurrency
print "Manufacturer : ", bookManufacturer
print "Pages : ", bookNoOfPages
print "Product Group : ", bookProductGroup
print "Product Type : ", bookProductTypeName
print "Published Date: ", bookPublicationDate
print "Publisher : ", bookPublisher
print "Studio : ", bookStudio
print "Small Image : ", bookSmallImage
print "Medium Image : ", bookMediumImage
print "Large Image : ", bookLargeImage
print "********************************************"
elif category == "DVD":
for tags in soupContent:
itemNumber += 1
if tags.asin:
dvdAsin = tags.asin.string
else:
dvdAsin = "NA"
if tags.detailpageurl:
dvdURL = tags.detailpageurl.string
else:
dvdURL = "NA"
if tags.find_all("actor"):
dvdActor = [ ]
for subtags in tags.find_all("actor"):
dvdActor.append(subtags.string)
else:
dvdActor = "NA"
if tags.smallimage.url:
dvdSmallImage = tags.smallimage.url.string
else:
dvdSmallImage = "NA"
if tags.mediumimage.url:
dvdMediumImage = tags.mediumimage.url.string
else:
dvdMediumImage = "NA"
if tags.nooftracks:
dvdNoOfTracks = tags.nooftracks.string
else:
dvdNoOfTracks = "NA"
if tags.largeimage.url:
dvdLargeImage = tags.largeimage.url.string
else:
dvdLargeImage = "NA"
if tags.binding:
dvdBinding = tags.binding.string
else:
dvdBinding = "NA"
if tags.director:
dvdDirector = tags.director.string
else:
dvdDirector = "NA"
if tags.ean:
dvdEan = tags.ean.string
else:
dvdEan = "NA"
if tags.eanlist:
if tags.eanlist.eanlistelement:
dvdEanlistelement = tags.eanlist.eanlistelement.string
else:
dvdEanlistelement = "NA"
if tags.format:
dvdFormat = tags.format.string
else:
dvdFormat = "NA"
if tags.genre:
dvdGenre = tags.genre.string
else:
dvdGenre = "NA"
if tags.isAutographed:
dvdAutographed = "True"
else:
dvdAutographed = "False"
if tags.issuesperyear:
dvdIssuesperYear = tags.issuesperyear.string
else:
dvdIssuesperYear = "NA"
if tags.itemdimensions:
if tags.itemdimensions.weight:
dvdWeight = tags.itemdimensions.weight.string
dvdWeight = int(dvdWeight)/100
else:
dvdWeight = "NA"
if tags.label:
dvdLabel = tags.label.string
dvdLabel = dvdLabel.replace("amp;","")
else:
dvdLabel = "NA"
if tags.languages:
if tags.languages.language.find("name"):
dvdLanguage = tags.languages.language.find("name").string
else:
dvdLanguage = "NA"
if tags.listprice:
if tags.listprice.formattedprice:
price = tags.listprice.formattedprice.string
price = price.split(" ")
dvdCurrency = price[0]
dvdPrice = price[1]
else:
dvdCurrency = "NA"
dvdPrice = "NA"
if tags.manufacturer:
dvdManufacturer = tags.manufacturer.string
else:
dvdManufacturer = "NA"
if tags.mediatype:
dvdMediaType = tags.mediatype.string
else:
dvdMediaType = "NA"
if tags.numberofdiscs:
dvdNoOfDiscs = tags.numberofdiscs.string
else:
dvdNoOfDiscs = "NA"
if tags.numberofitems:
dvdNoOfItems = tags.numberofitems.string
else:
dvdNoOfItems = "NA"
if tags.productgroup:
dvdProductGroup = tags.productgroup.string
else:
dvdProductGroup = "NA"
if tags.producttypename:
dvdProductTypeName = tags.producttypename.string
else:
dvdProductTypeName = "NA"
if tags.publicationdate:
dvdPublicationDate = tags.publicationdate.string
else:
dvdPublicationDate = "NA"
if tags.publisher:
dvdPublisher = tags.publisher.string
dvdPublisher = dvdPublisher.replace("amp;","")
else:
dvdPublisher = "NA"
if tags.releasedate:
dvdReleaseDate = tags.releasedate.string
else:
dvdReleaseDate = "NA"
if tags.runningtime:
dvdRunningTime = tags.runningtime.string
else:
dvdRunningTime = "NA"
if tags.studio:
dvdStudio = tags.studio.string
dvdStudio = dvdStudio.replace("amp;","")
else:
dvdStudio = "NA"
if tags.title:
dvdTitle = tags.title.string
dvdTitle = dvdTitle.replace("amp;","")
else:
dvdTitle = "NA"
print "ItemNumber : ", itemNumber
print "Title : ", dvdTitle
print "ASIN : ", dvdAsin
print "URL : ", dvdURL
print "Director : ", dvdDirector
print "Binding : ", dvdBinding
print "Autographed : ", dvdAutographed
print "No of Issues : ", dvdIssuesperYear
print "EAN : ", dvdEan
print "EanListelement: ", dvdEanlistelement
print "Actors : ", dvdActor
print "Format : ", dvdFormat
print "Genre : ", dvdGenre
print "Weight : ", dvdWeight
print "Label : ", dvdLabel
print "Language : ", dvdLanguage
print "Price : ", dvdPrice
print "Currency : ", dvdCurrency
print "Manufacturer : ", dvdManufacturer
print "MediaType : ", dvdMediaType
print "Product Group : ", dvdProductGroup
print "Product Type : ", dvdProductTypeName
print "Published Date: ", dvdPublicationDate
print "Release Date : ", dvdReleaseDate
print "No of Items : ", dvdNoOfItems
print "No of Discs : ", dvdNoOfDiscs
print "No of Tracks : ", dvdNoOfTracks
print "Run Time : ", dvdRunningTime
print "Publisher : ", dvdPublisher
print "Studio : ", dvdStudio
print "Small Image : ", dvdSmallImage
print "Medium Image : ", dvdMediumImage
print "Large Image : ", dvdLargeImage
print "********************************************"
elif category == "Electronics" or "Jewelry" or "Toys" or "Watches" or "PC HardWare":
for tags in soupContent:
itemNumber += 1
if tags.asin:
itemAsin = tags.asin.string
else:
itemAsin = "NA"
if tags.detailpageurl:
itemURL = tags.detailpageurl.string
else:
itemURL = "NA"
if tags.smallimage.url:
itemSmallImage = tags.smallimage.url.string
else:
itemSmallImage = "NA"
if tags.mediumimage.url:
itemMediumImage = tags.mediumimage.url.string
else:
itemMediumImage = "NA"
if tags.largeimage.url:
itemLargeImage = tags.largeimage.url.string
else:
itemLargeImage = "NA"
if tags.binding:
itemBinding = tags.binding.string
else:
itemBinding = "NA"
if tags.find_all("feature"):
itemFeat = tags.find_all("feature")
itemFeature = []
for subtags in itemFeat:
itemFeature.append(subtags.string)
else:
itemFeature = "NA"
if tags.itemdimensions:
if tags.itemdimensions.weight:
itemWeight = tags.itemdimensions.weight.string
itemWeight = float(itemWeight)/100
else:
itemWeight = "NA"
if tags.itemdimensions.height:
itemHeight = tags.itemdimensions.height.string
itemHeight = float(itemHeight)/100
else:
itemHeight = "NA"
if tags.itemdimensions.length:
itemLength = tags.itemdimensions.length.string
itemLength = float(itemLength)/100
else:
itemLength = "NA"
if tags.itemdimensions.width:
itemWidth = tags.itemdimensions.width.string
itemWidth = float(itemWidth)/100
else:
itemWidth = "NA"
else:
itemWeight = "NA"
itemHeight = "NA"
itemLength = "NA"
itemWidth = "NA"
if tags.label:
itemLabel = tags.label.string
itemLabel = itemLabel.replace("amp;","")
else:
itemlabel = "NA"
if tags.listprice:
if tags.listprice.formattedprice:
price = tags.listprice.formattedprice.string
price = price.split(" ")
itemCurrency = price[0]
itemPrice = price[1]
else:
itemCurrency = "NA"
itemPrice = "NA"
if tags.manufacturer:
itemManufacturer = tags.manufacturer.string
else:
itemManufacturer = "NA"
if tags.model:
itemModel = tags.model.string
else:
itemModel = "NA"
if tags.mpn:
itemMpn = tags.mpn.string
else:
itemMpn = "NA"
if tags.partnumber:
itemPartNumber = tags.partnumber.string
else:
itemPartNumber = "NA"
if tags.productgroup:
itemProductGroup = tags.productgroup.string
else:
itemProductGroup = "NA"
if tags.producttypename:
itemProductTypeName = tags.producttypename.string
else:
itemProductTypeName = "NA"
if tags.publicationdate:
itemPublicationDate = tags.publicationdate.string
else:
itemPublicationDate = "NA"
if tags.publisher:
itemPublisher = tags.publisher.string
itemPublisher = itemPublisher.replace("amp;","")
else:
itemPublisher = "NA"
if tags.releasedate:
itemReleaseDate = tags.releasedate.string
else:
itemReleaseDate = "NA"
if tags.studio:
itemStudio = tags.studio.string
itemStudio = itemStudio.replace("amp;","")
else:
itemStudio = "NA"
if tags.title:
itemTitle = tags.title.string
itemTitle = itemTitle.replace("amp;","")
else:
itemTitle = "NA"
if tags.brand:
itemBrand = tags.brand.string
itemBrand = itemBrand.replace("amp;","")
else:
itemBrand = "NA"
if tags.legaldisclaimer:
itemLegalDisclaimer = tags.legaldisclaimer.string
else:
itemLegalDisclaimer = "NA"
if tags.color:
itemColor = tags.color.string
else:
itemColor = "NA"
if tags.department:
itemDepartment = tags.department.string
else:
itemDepartment = "NA"
print "ItemNumber : ", itemNumber
print "Title : ", itemTitle
print "ASIN : ", itemAsin
print "URL : ", itemURL
print "Binding : ", itemBinding
print "Brand : ", itemBrand
print "Features : ", itemFeature
print "Color : ", itemColor
print "Department : ", itemDepartment
print "Weight : ", itemWeight
print "Length : ", itemLength
print "Height : ", itemHeight
print "Width : ", itemWidth
print "Label : ", itemLabel
print "Disclaimer : ", itemLegalDisclaimer
print "Price : ", itemPrice
print "Currency : ", itemCurrency
print "Manufacturer : ", itemManufacturer
print "Model : ", itemModel
print "MPN : ", itemMpn
print "Product Group : ", itemProductGroup
print "Product Type : ", itemProductTypeName
print "Published Date: ", itemPublicationDate
print "Release Date : ", itemReleaseDate
print "Publisher : ", itemPublisher
print "Studio : ", itemStudio
print "Small Image : ", itemSmallImage
print "Medium Image : ", itemMediumImage
print "Large Image : ", itemLargeImage
print "********************************************"
else:
print "Category parsing algorithm not present"
else:
print "Please check the category you entered and try again"
if __name__ == "__main__":
startTime = datetime.now()
print "Start Time:",startTime
#BrowseNodeExtraction()
ItemSearchExtraction()
endTime = datetime.now()
print "End Time:",endTime
totalTimeMicroseconds = endTime.microsecond - startTime.microsecond
totalTimeSeconds = endTime.second - startTime.second
totalTimeMinutes = endTime.minute - startTime.minute
totalTimeHours = endTime.hour - startTime.hour
if totalTimeHours < 0:
totalTimeHours = 24 + totalTimeHours
if totalTimeMinutes < 0:
totalTimeMinutes = 60 + totalTimeMinutes
totalTimeHours -= 1
if totalTimeSeconds < 0:
totalTimeSeconds = 60 + totalTimeSeconds
totalTimeMinutes -= 1
if totalTimeMicroseconds < 0:
totalTimeMicroseconds = 1000000 + totalTimeMicroseconds
totalTimeSeconds -= 1
totalTimeMilliseconds = totalTimeMicroseconds/1000
print "Total Time:",totalTimeHours,"H:",totalTimeMinutes,"M:",totalTimeSeconds,"S.",totalTimeMilliseconds,"ms"
| {
"repo_name": "TejaBeta/Spiders",
"path": "amazon.py",
"copies": "1",
"size": "35681",
"license": "mit",
"hash": -6159251653344000000,
"line_mean": 39.5927189989,
"line_max": 144,
"alpha_frac": 0.3925338415,
"autogenerated": false,
"ratio": 5.768024571613321,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6660558413113321,
"avg_score": null,
"num_lines": null
} |
# activity/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
from django.db.models import Q
from django.utils.timezone import now
from datetime import timedelta
import json
from wevote_functions.functions import positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_activity_notice_seed_integer, \
fetch_next_we_vote_id_activity_comment_integer, fetch_next_we_vote_id_activity_post_integer, \
fetch_site_unique_id_prefix
# Kind of Seeds
NOTICE_ACTIVITY_POST_SEED = 'NOTICE_ACTIVITY_POST_SEED'
NOTICE_FRIEND_ENDORSEMENTS_SEED = 'NOTICE_FRIEND_ENDORSEMENTS_SEED'
NOTICE_VOTER_DAILY_SUMMARY_SEED = 'NOTICE_VOTER_DAILY_SUMMARY_SEED' # Activity that touches each voter, for each day
# Kind of Notices
NOTICE_FRIEND_ACTIVITY_POSTS = 'NOTICE_FRIEND_ACTIVITY_POSTS' # Notice shown in header menu, no email sent
NOTICE_FRIEND_ENDORSEMENTS = 'NOTICE_FRIEND_ENDORSEMENTS'
NOTICE_VOTER_DAILY_SUMMARY = 'NOTICE_VOTER_DAILY_SUMMARY' # Email sent, not shown in header menu
FRIENDS_ONLY = 'FRIENDS_ONLY'
SHOW_PUBLIC = 'SHOW_PUBLIC'
class ActivityComment(models.Model):
"""
A voter-created comment on another item (like an ActivityPost)
"""
# The ultimate parent of all comments
parent_we_vote_id = models.CharField(max_length=255, default=None, null=True, db_index=True)
# The comment that is the parent of this comment (only used when a comment on a comment)
parent_comment_we_vote_id = models.CharField(max_length=255, default=None, null=True, db_index=True)
commenter_name = models.CharField(max_length=255, default=None, null=True)
commenter_organization_we_vote_id = models.CharField(max_length=255, default=None, null=True)
commenter_twitter_followers_count = models.PositiveIntegerField(default=None, null=True)
commenter_twitter_handle = models.CharField(max_length=255, default=None, null=True)
commenter_voter_we_vote_id = models.CharField(max_length=255, default=None, null=True)
commenter_profile_image_url_medium = models.TextField(blank=True, null=True)
commenter_profile_image_url_tiny = models.TextField(blank=True, null=True)
date_created = models.DateTimeField(null=True)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
deleted = models.BooleanField(default=False)
statement_text = models.TextField(null=True, blank=True)
visibility_is_public = models.BooleanField(default=False)
we_vote_id = models.CharField(max_length=255, default=None, null=True, unique=True, db_index=True)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_activity_comment_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "comment" = tells us this is a unique id for an ActivityPost
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}comment{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(ActivityComment, self).save(*args, **kwargs)
class ActivityManager(models.Manager):
def __unicode__(self):
return "ActivityManager"
def create_activity_notice(
self,
activity_notice_seed_id=0,
activity_tidbit_we_vote_id='',
date_of_notice=None,
kind_of_notice=None,
kind_of_seed=None,
number_of_comments=0,
number_of_likes=0,
position_name_list_serialized=None,
position_we_vote_id_list_serialized=None,
recipient_voter_we_vote_id='',
send_to_email=False,
send_to_sms=False,
speaker_name='',
speaker_organization_we_vote_id='',
speaker_voter_we_vote_id='',
speaker_profile_image_url_medium='',
speaker_profile_image_url_tiny='',
statement_text_preview=''):
status = ''
if not positive_value_exists(speaker_organization_we_vote_id):
activity_notice = None
results = {
'success': False,
'status': "ACTIVITY_NOTICE_MISSING_SPEAKER_ORG_ID ",
'activity_notice_saved': False,
'activity_notice': activity_notice,
}
return results
try:
new_positions_entered_count = 0
if positive_value_exists(position_we_vote_id_list_serialized):
position_we_vote_id_list = json.loads(position_we_vote_id_list_serialized)
new_positions_entered_count += len(position_we_vote_id_list)
activity_notice = ActivityNotice.objects.create(
activity_notice_seed_id=activity_notice_seed_id,
activity_tidbit_we_vote_id=activity_tidbit_we_vote_id,
date_of_notice=date_of_notice,
kind_of_notice=kind_of_notice,
kind_of_seed=kind_of_seed,
new_positions_entered_count=new_positions_entered_count,
number_of_comments=number_of_comments,
number_of_likes=number_of_likes,
position_name_list_serialized=position_name_list_serialized,
position_we_vote_id_list_serialized=position_we_vote_id_list_serialized,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
send_to_email=send_to_email,
send_to_sms=send_to_sms,
speaker_name=speaker_name,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny,
statement_text_preview=statement_text_preview
)
activity_notice_saved = True
success = True
status += "ACTIVITY_NOTICE_CREATED "
except Exception as e:
activity_notice_saved = False
activity_notice = None
success = False
status += "ACTIVITY_NOTICE_NOT_CREATED: " + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_notice_saved': activity_notice_saved,
'activity_notice': activity_notice,
}
return results
def create_activity_notice_seed(
self,
activity_notices_created=False,
activity_notices_scheduled=False,
activity_tidbit_we_vote_ids_for_friends_serialized='',
activity_tidbit_we_vote_ids_for_public_serialized='',
date_of_notice=None,
kind_of_seed=None,
position_names_for_friends_serialized='',
position_names_for_public_serialized='',
position_we_vote_ids_for_friends_serialized='',
position_we_vote_ids_for_public_serialized='',
recipient_name='',
recipient_voter_we_vote_id='',
send_to_email=False, # For VOTER_DAILY_SUMMARY
send_to_sms=False, # For VOTER_DAILY_SUMMARY
speaker_name='',
speaker_organization_we_vote_id='',
speaker_organization_we_vote_ids_serialized=None,
speaker_voter_we_vote_id='',
speaker_voter_we_vote_ids_serialized=None,
speaker_profile_image_url_medium='',
speaker_profile_image_url_tiny='',
statement_text_preview=''):
status = ''
if not positive_value_exists(speaker_organization_we_vote_id) \
and not positive_value_exists(recipient_voter_we_vote_id):
activity_notice_seed = None
results = {
'success': False,
'status': "ACTIVITY_NOTICE_SEED_MISSING_SPEAKER_AND_LISTENER ",
'activity_notice_seed_saved': False,
'activity_notice_seed': activity_notice_seed,
}
return results
try:
activity_notice_seed = ActivityNoticeSeed.objects.create(
activity_notices_created=activity_notices_created,
activity_notices_scheduled=activity_notices_scheduled,
activity_tidbit_we_vote_ids_for_friends_serialized=activity_tidbit_we_vote_ids_for_friends_serialized,
activity_tidbit_we_vote_ids_for_public_serialized=activity_tidbit_we_vote_ids_for_public_serialized,
date_of_notice=date_of_notice,
kind_of_seed=kind_of_seed,
position_names_for_friends_serialized=position_names_for_friends_serialized,
position_names_for_public_serialized=position_names_for_public_serialized,
position_we_vote_ids_for_friends_serialized=position_we_vote_ids_for_friends_serialized,
position_we_vote_ids_for_public_serialized=position_we_vote_ids_for_public_serialized,
recipient_name=recipient_name,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
send_to_email=send_to_email,
send_to_sms=send_to_sms,
speaker_name=speaker_name,
speaker_organization_we_vote_id=speaker_organization_we_vote_id,
speaker_organization_we_vote_ids_serialized=speaker_organization_we_vote_ids_serialized,
speaker_voter_we_vote_id=speaker_voter_we_vote_id,
speaker_voter_we_vote_ids_serialized=speaker_voter_we_vote_ids_serialized,
speaker_profile_image_url_medium=speaker_profile_image_url_medium,
speaker_profile_image_url_tiny=speaker_profile_image_url_tiny,
statement_text_preview=statement_text_preview
)
activity_notice_seed_saved = True
success = True
status += "ACTIVITY_NOTICE_SEED_CREATED "
except Exception as e:
activity_notice_seed_saved = False
activity_notice_seed = None
success = False
status += "ACTIVITY_NOTICE_SEED_NOT_CREATED: " + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_notice_seed_saved': activity_notice_seed_saved,
'activity_notice_seed': activity_notice_seed,
}
return results
def create_activity_post(
self,
sender_voter_we_vote_id,
sender_voter_sms,
recipient_voter_we_vote_id='',
recipient_sms_we_vote_id='',
recipient_voter_sms='',
template_variables_in_json='',
kind_of_sms_template=''):
status = ""
success = True
try:
activity_post = ActivityPost.objects.create(
kind_of_sms_template=kind_of_sms_template,
recipient_voter_we_vote_id=recipient_voter_we_vote_id,
recipient_sms_we_vote_id=recipient_sms_we_vote_id,
recipient_voter_sms=recipient_voter_sms,
sender_voter_we_vote_id=sender_voter_we_vote_id,
sender_voter_sms=sender_voter_sms,
template_variables_in_json=template_variables_in_json,
)
activity_post_saved = True
success = True
status += "SMS_DESCRIPTION_CREATED "
except Exception as e:
activity_post_saved = False
activity_post = ActivityPost()
success = False
status += "SMS_DESCRIPTION_NOT_CREATED " + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_post_saved': activity_post_saved,
'activity_post': activity_post,
}
return results
def fetch_number_of_comments(self, parent_we_vote_id='', parent_comment_we_vote_id=''):
results = self.retrieve_number_of_comments(
parent_we_vote_id=parent_we_vote_id,
parent_comment_we_vote_id=parent_comment_we_vote_id)
return results['number_of_comments']
def retrieve_number_of_comments(self, parent_we_vote_id='', parent_comment_we_vote_id=''):
"""
:param parent_we_vote_id:
:param parent_comment_we_vote_id:
:return:
"""
status = ""
success = True
if not positive_value_exists(parent_we_vote_id) and not positive_value_exists(parent_comment_we_vote_id):
success = False
status += 'VALID_PARENT_OR_PARENT_COMMENT_WE_VOTE_ID_MISSING-NUMBER_OF_COMMENTS '
results = {
'success': success,
'status': status,
'parent_we_vote_id': parent_we_vote_id,
'parent_comment_we_vote_id': parent_comment_we_vote_id,
'number_of_comments': 0,
}
return results
number_of_comments = 0
try:
if positive_value_exists(parent_comment_we_vote_id):
queryset = ActivityComment.objects.all()
queryset = queryset.filter(
parent_comment_we_vote_id__iexact=parent_comment_we_vote_id,
deleted=False
)
else:
queryset = ActivityComment.objects.all()
queryset = queryset.filter(
parent_we_vote_id__iexact=parent_we_vote_id,
deleted=False
)
# Don't retrieve entries where there is a value for parent_comment_we_vote_id
queryset = queryset.filter(
Q(parent_comment_we_vote_id=None) | Q(parent_comment_we_vote_id=""))
queryset = queryset.exclude(
Q(parent_we_vote_id=None) | Q(parent_we_vote_id=""))
number_of_comments = queryset.count()
except Exception as e:
success = False
status += 'FAILED retrieve_number_of_comments ActivityComment: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'parent_we_vote_id': parent_we_vote_id,
'parent_comment_we_vote_id': parent_comment_we_vote_id,
'number_of_comments': number_of_comments,
}
return results
def retrieve_activity_comment_list(self, parent_we_vote_id='', parent_comment_we_vote_id=''):
"""
:param parent_we_vote_id:
:param parent_comment_we_vote_id:
:return:
"""
status = ""
success = True
if not positive_value_exists(parent_we_vote_id) and not positive_value_exists(parent_comment_we_vote_id):
success = False
status += 'VALID_PARENT_OR_PARENT_COMMENT_WE_VOTE_ID_MISSING '
results = {
'success': success,
'status': status,
'parent_we_vote_id': parent_we_vote_id,
'parent_comment_we_vote_id': parent_comment_we_vote_id,
'activity_comment_list_found': False,
'activity_comment_list': [],
}
return results
activity_comment_list = []
try:
if positive_value_exists(parent_comment_we_vote_id):
queryset = ActivityComment.objects.all()
queryset = queryset.filter(
parent_comment_we_vote_id__iexact=parent_comment_we_vote_id,
deleted=False
)
else:
queryset = ActivityComment.objects.all()
queryset = queryset.filter(
parent_we_vote_id__iexact=parent_we_vote_id,
deleted=False
)
# Don't retrieve entries where there is a value for parent_comment_we_vote_id
queryset = queryset.filter(
Q(parent_comment_we_vote_id=None) | Q(parent_comment_we_vote_id=""))
queryset = queryset.exclude(
Q(parent_we_vote_id=None) | Q(parent_we_vote_id=""))
queryset = queryset.order_by('-id') # Put most recent at top of list
activity_comment_list = list(queryset)
if len(activity_comment_list):
activity_comment_list_found = True
status += 'ACTIVITY_COMMENT_LIST_RETRIEVED '
else:
activity_comment_list_found = False
status += 'NO_ACTIVITY_COMMENT_LIST_RETRIEVED '
except ActivityComment.DoesNotExist:
# No data found. Not a problem.
activity_comment_list_found = False
status += 'NO_ACTIVITY_COMMENT_LIST_RETRIEVED_DoesNotExist '
activity_comment_list = []
except Exception as e:
success = False
activity_comment_list_found = False
status += 'FAILED retrieve_activity_comment_list ActivityComment: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'parent_we_vote_id': parent_we_vote_id,
'parent_comment_we_vote_id': parent_comment_we_vote_id,
'activity_comment_list_found': activity_comment_list_found,
'activity_comment_list': activity_comment_list,
}
return results
def retrieve_activity_notice_seed_list(
self,
notices_to_be_created=False):
status = ""
activity_notice_seed_list = []
try:
queryset = ActivityNoticeSeed.objects.all()
queryset = queryset.filter(deleted=False)
if positive_value_exists(notices_to_be_created):
queryset = queryset.filter(activity_notices_created=False)
queryset = queryset.order_by('-id') # Put most recent at top of list
activity_notice_seed_list = list(queryset)
if len(activity_notice_seed_list):
success = True
activity_notice_seed_list_found = True
status += 'ACTIVITY_NOTICE_SEED_LIST_RETRIEVED '
else:
success = True
activity_notice_seed_list_found = False
status += 'NO_ACTIVITY_NOTICE_SEED_LIST_RETRIEVED '
except Exception as e:
success = False
activity_notice_seed_list_found = False
status += 'FAILED retrieve_activity_notice_seed_list ActivityNoticeSeed ' + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_notice_seed_list_found': activity_notice_seed_list_found,
'activity_notice_seed_list': activity_notice_seed_list,
}
return results
def retrieve_activity_notice_list(
self,
activity_notice_seed_id=0,
to_be_sent_to_email=False,
to_be_sent_to_sms=False,
retrieve_count_limit=0,
activity_notice_id_already_reviewed_list=[]):
status = ""
activity_notice_list = []
try:
queryset = ActivityNotice.objects.all()
queryset = queryset.filter(deleted=False)
if positive_value_exists(activity_notice_seed_id):
queryset = queryset.filter(activity_notice_seed_id=activity_notice_seed_id)
if positive_value_exists(to_be_sent_to_email):
queryset = queryset.filter(send_to_email=True)
queryset = queryset.filter(scheduled_to_email=False)
queryset = queryset.filter(sent_to_email=False)
elif positive_value_exists(to_be_sent_to_sms):
queryset = queryset.filter(send_to_sms=True)
queryset = queryset.filter(scheduled_to_sms=False)
queryset = queryset.filter(sent_to_sms=False)
if activity_notice_id_already_reviewed_list and len(activity_notice_id_already_reviewed_list) > 0:
queryset = queryset.exclude(id__in=activity_notice_id_already_reviewed_list)
queryset = queryset.order_by('-id') # Put most recent at top of list
if positive_value_exists(retrieve_count_limit):
activity_notice_list = queryset[:retrieve_count_limit]
else:
activity_notice_list = list(queryset)
if len(activity_notice_list):
success = True
activity_notice_list_found = True
status += 'ACTIVITY_NOTICE_LIST_RETRIEVED '
else:
success = True
activity_notice_list_found = False
status += 'NO_ACTIVITY_NOTICE_LIST_RETRIEVED '
except Exception as e:
success = False
activity_notice_list_found = False
status += 'FAILED retrieve_activity_notice_list: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_notice_list_found': activity_notice_list_found,
'activity_notice_list': activity_notice_list,
}
return results
def retrieve_recent_activity_notice_seed_from_listener(
self,
kind_of_seed='',
recipient_voter_we_vote_id=''):
"""
:param kind_of_seed:
:param recipient_voter_we_vote_id:
:return:
"""
exception_does_not_exist = False
exception_multiple_object_returned = False
activity_notice_seed_found = False
activity_notice_seed = None
activity_notice_seed_id = 0
status = ""
lifespan_of_seed_in_seconds = get_lifespan_of_seed(kind_of_seed) # In seconds
earliest_date_of_notice = now() - timedelta(seconds=lifespan_of_seed_in_seconds)
try:
if positive_value_exists(recipient_voter_we_vote_id):
activity_notice_seed = ActivityNoticeSeed.objects.get(
date_of_notice__gte=earliest_date_of_notice,
deleted=False,
kind_of_seed=kind_of_seed,
recipient_voter_we_vote_id__iexact=recipient_voter_we_vote_id,
)
activity_notice_seed_id = activity_notice_seed.id
activity_notice_seed_found = True
success = True
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_SEED_FOUND_BY_LISTENER_VOTER_WE_VOTE_ID "
else:
activity_notice_seed_found = False
success = False
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_SEED_LISTENER_VOTER_WE_VOTE_ID_MISSING "
except ActivityNoticeSeed.DoesNotExist:
exception_does_not_exist = True
success = True
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_SEED_NOT_FOUND "
except Exception as e:
success = False
status += 'FAILED retrieve_recent_activity_notice_seed_from_listener ActivityNoticeSeed: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'activity_notice_seed_found': activity_notice_seed_found,
'activity_notice_seed_id': activity_notice_seed_id,
'activity_notice_seed': activity_notice_seed,
}
return results
def retrieve_recent_activity_notice_from_speaker_and_recipient(
self,
activity_notice_seed_id=0,
kind_of_notice='',
recipient_voter_we_vote_id='',
speaker_organization_we_vote_id='',
speaker_voter_we_vote_id=''):
exception_does_not_exist = False
exception_multiple_object_returned = False
activity_notice = None
activity_notice_found = False
activity_notice_id = 0
status = ""
try:
if positive_value_exists(speaker_organization_we_vote_id):
activity_notice = ActivityNotice.objects.get(
activity_notice_seed_id=activity_notice_seed_id,
deleted=False,
kind_of_notice=kind_of_notice,
recipient_voter_we_vote_id__iexact=recipient_voter_we_vote_id,
speaker_organization_we_vote_id__iexact=speaker_organization_we_vote_id,
)
activity_notice_id = activity_notice.id
activity_notice_found = True
success = True
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_FOUND_BY_ORG_WE_VOTE_ID "
elif positive_value_exists(speaker_voter_we_vote_id):
activity_notice = ActivityNotice.objects.get(
activity_notice_seed_id=activity_notice_seed_id,
deleted=False,
kind_of_notice=kind_of_notice,
recipient_voter_we_vote_id__iexact=recipient_voter_we_vote_id,
speaker_voter_we_vote_id__iexact=speaker_voter_we_vote_id,
)
activity_notice_id = activity_notice.id
activity_notice_found = True
success = True
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_FOUND_BY_VOTER_WE_VOTE_ID "
else:
activity_notice_found = False
success = False
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_VARIABLES_MISSING "
except ActivityNotice.DoesNotExist:
exception_does_not_exist = True
success = True
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_NOT_FOUND "
except Exception as e:
success = False
status += 'FAILED retrieve_recent_activity_notice_from_speaker_and_recipient: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'activity_notice_found': activity_notice_found,
'activity_notice_id': activity_notice_id,
'activity_notice': activity_notice,
}
return results
def retrieve_recent_activity_notice_seed_from_speaker(
self,
kind_of_seed='',
speaker_organization_we_vote_id='',
speaker_voter_we_vote_id=''):
"""
:param kind_of_seed:
:param speaker_organization_we_vote_id:
:param speaker_voter_we_vote_id:
:return:
"""
exception_does_not_exist = False
exception_multiple_object_returned = False
activity_notice_seed_found = False
activity_notice_seed = None
activity_notice_seed_id = 0
status = ""
lifespan_of_seed_in_seconds = get_lifespan_of_seed(kind_of_seed) # In seconds
earliest_date_of_notice = now() - timedelta(seconds=lifespan_of_seed_in_seconds)
try:
if positive_value_exists(speaker_organization_we_vote_id):
activity_notice_seed = ActivityNoticeSeed.objects.get(
date_of_notice__gte=earliest_date_of_notice,
deleted=False,
kind_of_seed=kind_of_seed,
speaker_organization_we_vote_id__iexact=speaker_organization_we_vote_id,
)
activity_notice_seed_id = activity_notice_seed.id
activity_notice_seed_found = True
success = True
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_SEED_FOUND_BY_ORG_WE_VOTE_ID "
elif positive_value_exists(speaker_voter_we_vote_id):
activity_notice_seed = ActivityNoticeSeed.objects.get(
date_of_notice__gte=earliest_date_of_notice,
deleted=False,
kind_of_seed=kind_of_seed,
speaker_voter_we_vote_id__iexact=speaker_voter_we_vote_id,
)
activity_notice_seed_id = activity_notice_seed.id
activity_notice_seed_found = True
success = True
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_SEED_FOUND_BY_VOTER_WE_VOTE_ID "
else:
activity_notice_seed_found = False
success = False
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_SEED_VARIABLES_MISSING "
except ActivityNoticeSeed.DoesNotExist:
exception_does_not_exist = True
success = True
status += "RETRIEVE_RECENT_ACTIVITY_NOTICE_SEED_NOT_FOUND "
except Exception as e:
success = False
status += 'FAILED retrieve_recent_activity_notice_seed_from_speaker ActivityNoticeSeed ' + str(e) + ' '
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'activity_notice_seed_found': activity_notice_seed_found,
'activity_notice_seed_id': activity_notice_seed_id,
'activity_notice_seed': activity_notice_seed,
}
return results
def retrieve_activity_notice_list_for_recipient(self, recipient_voter_we_vote_id=''):
"""
:param recipient_voter_we_vote_id:
:return:
"""
status = ""
if not positive_value_exists(recipient_voter_we_vote_id):
success = False
status += 'VALID_VOTER_WE_VOTE_ID_MISSING '
results = {
'success': success,
'status': status,
'recipient_voter_we_vote_id': recipient_voter_we_vote_id,
'activity_notice_list_found': False,
'activity_notice_list': [],
}
return results
activity_notice_list = []
try:
queryset = ActivityNotice.objects.all()
queryset = queryset.filter(
recipient_voter_we_vote_id__iexact=recipient_voter_we_vote_id,
deleted=False
)
queryset = queryset.exclude(
Q(recipient_voter_we_vote_id=None) | Q(recipient_voter_we_vote_id=""))
queryset = queryset.order_by('-id') # Put most recent at top of list
activity_notice_list = queryset[:30]
if len(activity_notice_list):
success = True
activity_notice_list_found = True
status += 'ACTIVITY_NOTICE_LIST_RETRIEVED '
else:
success = True
activity_notice_list_found = False
status += 'NO_ACTIVITY_NOTICE_LIST_RETRIEVED '
except ActivityNotice.DoesNotExist:
# No data found. Not a problem.
success = True
activity_notice_list_found = False
status += 'NO_ACTIVITY_NOTICE_LIST_RETRIEVED_DoesNotExist '
activity_notice_list = []
except Exception as e:
success = False
activity_notice_list_found = False
status += 'FAILED retrieve_voter_activity_notice_list ActivityNotice ' + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_notice_list_found': activity_notice_list_found,
'activity_notice_list': activity_notice_list,
'recipient_voter_we_vote_id': recipient_voter_we_vote_id,
}
return results
def retrieve_activity_notice_seed_list_for_recipient(
self,
recipient_voter_we_vote_id='',
kind_of_seed_list=None,
limit_to_activity_tidbit_we_vote_id_list=[]):
"""
:param recipient_voter_we_vote_id:
:param kind_of_seed_list:
:param limit_to_activity_tidbit_we_vote_id_list:
:return:
"""
status = ""
if not positive_value_exists(recipient_voter_we_vote_id):
success = False
status += 'VALID_VOTER_WE_VOTE_ID_MISSING '
results = {
'success': success,
'status': status,
'recipient_voter_we_vote_id': recipient_voter_we_vote_id,
'activity_notice_seed_list_found': False,
'activity_notice_seed_list': [],
'voter_friend_we_vote_id_list': [],
}
return results
activity_notice_seed_list = []
voter_friend_we_vote_id_list = []
voter_friend_we_vote_id_list.append(recipient_voter_we_vote_id)
from friend.models import FriendManager
friend_manager = FriendManager()
friend_results = friend_manager.retrieve_friends_we_vote_id_list(recipient_voter_we_vote_id)
if friend_results['friends_we_vote_id_list_found']:
friends_we_vote_id_list = friend_results['friends_we_vote_id_list']
voter_friend_we_vote_id_list += friends_we_vote_id_list
try:
queryset = ActivityNoticeSeed.objects.all()
queryset = queryset.filter(deleted=False)
queryset = queryset.filter(speaker_voter_we_vote_id__in=voter_friend_we_vote_id_list)
if limit_to_activity_tidbit_we_vote_id_list and len(limit_to_activity_tidbit_we_vote_id_list) > 0:
queryset = queryset.filter(we_vote_id__in=limit_to_activity_tidbit_we_vote_id_list)
if kind_of_seed_list and len(kind_of_seed_list) > 0:
queryset = queryset.filter(kind_of_seed__in=kind_of_seed_list)
queryset = queryset.exclude(
Q(speaker_voter_we_vote_id=None) | Q(speaker_voter_we_vote_id=""))
queryset = queryset.order_by('-id') # Put most recent at top of list
activity_notice_seed_list = queryset[:200]
if len(activity_notice_seed_list):
success = True
activity_notice_seed_list_found = True
status += 'ACTIVITY_NOTICE_SEED_LIST_RETRIEVED '
else:
success = True
activity_notice_seed_list_found = False
status += 'NO_ACTIVITY_NOTICE_SEED_LIST_RETRIEVED '
except ActivityNoticeSeed.DoesNotExist:
# No data found. Not a problem.
success = True
activity_notice_seed_list_found = False
status += 'NO_ACTIVITY_NOTICE_SEED_LIST_RETRIEVED_DoesNotExist '
activity_notice_seed_list = []
except Exception as e:
success = False
activity_notice_seed_list_found = False
status += 'FAILED retrieve_voter_activity_notice_seed_list: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'recipient_voter_we_vote_id': recipient_voter_we_vote_id,
'activity_notice_seed_list_found': activity_notice_seed_list_found,
'activity_notice_seed_list': activity_notice_seed_list,
'voter_friend_we_vote_id_list': voter_friend_we_vote_id_list,
}
return results
def retrieve_next_activity_notice_seed_to_process(
self,
notices_to_be_created=False,
notices_to_be_scheduled=False,
notices_to_be_updated=False,
to_be_added_to_voter_daily_summary=False,
activity_notice_seed_id_already_reviewed_list=[]):
status = ""
activity_notice_seed = None
try:
queryset = ActivityNoticeSeed.objects.all()
queryset = queryset.filter(deleted=False)
if positive_value_exists(notices_to_be_created):
queryset = queryset.filter(activity_notices_created=False)
queryset = \
queryset.filter(kind_of_seed__in=[NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ENDORSEMENTS_SEED])
elif positive_value_exists(notices_to_be_scheduled):
queryset = queryset.filter(activity_notices_scheduled=False)
queryset = queryset.filter(
kind_of_seed__in=[NOTICE_FRIEND_ENDORSEMENTS_SEED, NOTICE_VOTER_DAILY_SUMMARY_SEED])
elif positive_value_exists(notices_to_be_updated):
queryset = queryset.filter(activity_notices_created=True)
queryset = queryset.filter(date_of_notice_earlier_than_update_window=False)
queryset = queryset.filter(
kind_of_seed__in=[NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ENDORSEMENTS_SEED])
elif positive_value_exists(to_be_added_to_voter_daily_summary):
queryset = queryset.filter(added_to_voter_daily_summary=False)
queryset = queryset.filter(
kind_of_seed__in=[NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ENDORSEMENTS_SEED])
if activity_notice_seed_id_already_reviewed_list and len(activity_notice_seed_id_already_reviewed_list) > 0:
queryset = queryset.exclude(id__in=activity_notice_seed_id_already_reviewed_list)
queryset = queryset.order_by('-id') # Put most recent at top of list
activity_notice_seed_list = queryset[:1]
if len(activity_notice_seed_list):
success = True
activity_notice_seed = activity_notice_seed_list[0]
activity_notice_seed_found = True
status += 'ACTIVITY_NOTICE_SEED_RETRIEVED '
else:
success = True
activity_notice_seed = None
activity_notice_seed_found = False
status += 'NO_ACTIVITY_NOTICE_SEED_RETRIEVED '
except Exception as e:
success = False
activity_notice_seed_found = False
status += 'FAILED retrieve_activity_notice_seed ActivityNoticeSeed ' + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_notice_seed_found': activity_notice_seed_found,
'activity_notice_seed': activity_notice_seed,
}
return results
def retrieve_activity_post_list(
self,
speaker_voter_we_vote_id_list=[],
limit_to_visibility_is_friends_only=False,
limit_to_visibility_is_public=False,
since_date=None):
"""
:param speaker_voter_we_vote_id_list:
:param limit_to_visibility_is_friends_only:
:param limit_to_visibility_is_public:
:param since_date:
:return:
"""
status = ""
if not speaker_voter_we_vote_id_list or len(speaker_voter_we_vote_id_list) == 0:
success = False
status += 'VALID_VOTER_WE_VOTE_IDS_MISSING '
results = {
'success': success,
'status': status,
'activity_post_list_found': False,
'activity_post_list': [],
}
return results
activity_post_list = []
try:
queryset = ActivityPost.objects.all()
queryset = queryset.filter(
speaker_voter_we_vote_id__in=speaker_voter_we_vote_id_list,
deleted=False
)
if positive_value_exists(since_date):
queryset = queryset.filter(date_created__gte=since_date)
if positive_value_exists(limit_to_visibility_is_friends_only):
queryset = queryset.filter(visibility_is_public=False)
elif positive_value_exists(limit_to_visibility_is_public):
queryset = queryset.filter(visibility_is_public=True)
queryset = queryset.exclude(
Q(speaker_voter_we_vote_id=None) | Q(speaker_voter_we_vote_id=""))
queryset = queryset.order_by('-id') # Put most recent ActivityPost at top of list
activity_post_list = queryset[:200]
if len(activity_post_list):
success = True
activity_post_list_found = True
status += 'ACTIVITY_POST_LIST_RETRIEVED '
else:
success = True
activity_post_list_found = False
status += 'NO_ACTIVITY_POST_LIST_RETRIEVED '
except ActivityPost.DoesNotExist:
# No data found. Not a problem.
success = True
activity_post_list_found = False
status += 'NO_ACTIVITY_POST_LIST_RETRIEVED_DoesNotExist '
activity_post_list = []
except Exception as e:
success = False
activity_post_list_found = False
status += 'FAILED retrieve_activity_post_list: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_post_list_found': activity_post_list_found,
'activity_post_list': activity_post_list,
}
return results
def retrieve_activity_post_list_for_recipient(
self,
recipient_voter_we_vote_id='',
limit_to_activity_tidbit_we_vote_id_list=[],
voter_friend_we_vote_id_list=[]):
"""
:param recipient_voter_we_vote_id:
:param limit_to_activity_tidbit_we_vote_id_list:
:param voter_friend_we_vote_id_list:
:return:
"""
status = ""
if not positive_value_exists(recipient_voter_we_vote_id):
success = False
status += 'VALID_VOTER_WE_VOTE_ID_MISSING '
results = {
'success': success,
'status': status,
'recipient_voter_we_vote_id': recipient_voter_we_vote_id,
'activity_post_list_found': False,
'activity_post_list': [],
}
return results
activity_post_list = []
if not positive_value_exists(len(voter_friend_we_vote_id_list)):
voter_friend_we_vote_id_list = []
voter_friend_we_vote_id_list.append(recipient_voter_we_vote_id)
from friend.models import FriendManager
friend_manager = FriendManager()
friend_results = friend_manager.retrieve_friends_we_vote_id_list(recipient_voter_we_vote_id)
if friend_results['friends_we_vote_id_list_found']:
friends_we_vote_id_list = friend_results['friends_we_vote_id_list']
voter_friend_we_vote_id_list += friends_we_vote_id_list
try:
queryset = ActivityPost.objects.all()
queryset = queryset.filter(deleted=False)
if limit_to_activity_tidbit_we_vote_id_list and len(limit_to_activity_tidbit_we_vote_id_list) > 0:
queryset = queryset.filter(we_vote_id__in=limit_to_activity_tidbit_we_vote_id_list)
# Allow the public ActivityPosts to be found
queryset = queryset.filter(
Q(speaker_voter_we_vote_id__in=voter_friend_we_vote_id_list) | Q(visibility_is_public=True))
else:
queryset = queryset.filter(speaker_voter_we_vote_id__in=voter_friend_we_vote_id_list)
queryset = queryset.exclude(
Q(speaker_voter_we_vote_id=None) | Q(speaker_voter_we_vote_id=""))
queryset = queryset.order_by('-id') # Put most recent ActivityPost at top of list
activity_post_list = queryset[:200]
if len(activity_post_list):
success = True
activity_post_list_found = True
status += 'ACTIVITY_POST_LIST_FOR_RECIPIENT_RETRIEVED '
else:
success = True
activity_post_list_found = False
status += 'NO_ACTIVITY_POST_LIST_FOR_RECIPIENT_RETRIEVED '
except ActivityPost.DoesNotExist:
# No data found. Not a problem.
success = True
activity_post_list_found = False
status += 'NO_ACTIVITY_POST_LIST_FOR_RECIPIENT_RETRIEVED_DoesNotExist '
activity_post_list = []
except Exception as e:
success = False
activity_post_list_found = False
status += 'FAILED retrieve_activity_post_list_for_recipient: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'recipient_voter_we_vote_id': recipient_voter_we_vote_id,
'activity_post_list_found': activity_post_list_found,
'activity_post_list': activity_post_list,
}
return results
def update_activity_notice_list_in_bulk(
self,
recipient_voter_we_vote_id='',
activity_notice_id_list=[],
activity_notice_seen=False,
activity_notice_clicked=False):
status = ""
if not positive_value_exists(recipient_voter_we_vote_id):
success = False
status += 'VALID_VOTER_WE_VOTE_ID_MISSING '
results = {
'success': success,
'status': status,
'recipient_voter_we_vote_id': recipient_voter_we_vote_id,
'activity_notice_list_updated': False,
}
return results
try:
if activity_notice_clicked and activity_notice_seen:
ActivityNotice.objects.all().filter(
id__in=activity_notice_id_list,
recipient_voter_we_vote_id__iexact=recipient_voter_we_vote_id,
deleted=False
).update(
activity_notice_seen=True,
activity_notice_clicked=True)
elif activity_notice_clicked:
ActivityNotice.objects.all().filter(
id__in=activity_notice_id_list,
recipient_voter_we_vote_id__iexact=recipient_voter_we_vote_id,
deleted=False
).update(
activity_notice_clicked=True)
elif activity_notice_seen:
ActivityNotice.objects.all().filter(
id__in=activity_notice_id_list,
recipient_voter_we_vote_id__iexact=recipient_voter_we_vote_id,
deleted=False
).update(
activity_notice_seen=True)
success = True
activity_notice_list_updated = True
status += 'ACTIVITY_NOTICE_LIST_UPDATED '
except ActivityNotice.DoesNotExist:
# No data found. Not a problem.
success = True
activity_notice_list_updated = False
status += 'NO_ACTIVITY_NOTICE_LIST_ENTRIES_FOUND '
except Exception as e:
success = False
activity_notice_list_updated = False
status += 'FAILED update_activity_notice_list_in_bulk ActivityNotice ' + str(e) + ' '
results = {
'success': success,
'status': status,
'recipient_voter_we_vote_id': recipient_voter_we_vote_id,
'activity_notice_list_updated': activity_notice_list_updated,
}
return results
def update_activity_notice_seed(self, activity_notice_seed_id, update_values):
"""
:param activity_notice_seed_id:
:param update_values:
:return:
"""
success = False
status = ""
activity_notice_seed_updated = False
existing_entry = ''
try:
existing_entry = ActivityNoticeSeed.objects.get(id=activity_notice_seed_id)
values_changed = False
if existing_entry:
# found the existing entry, update the values
if 'date_of_notice' in update_values:
existing_entry.date_of_notice = update_values['ballotpedia_activity_notice_seed_id']
values_changed = True
if 'deleted' in update_values:
existing_entry.deleted = update_values['deleted']
values_changed = True
if 'kind_of_seed' in update_values:
existing_entry.kind_of_seed = update_values['kind_of_seed']
values_changed = True
if 'position_names_for_friends_serialized' in update_values:
existing_entry.position_names_for_friends_serialized = \
update_values['position_names_for_friends_serialized']
values_changed = True
if 'position_names_for_public_serialized' in update_values:
existing_entry.position_names_for_public_serialized = \
update_values['position_names_for_public_serialized']
values_changed = True
if 'position_we_vote_ids_for_friends_serialized' in update_values:
existing_entry.position_we_vote_ids_for_friends_serialized = \
update_values['position_we_vote_ids_for_friends_serialized']
values_changed = True
if 'position_we_vote_ids_for_public_serialized' in update_values:
existing_entry.position_we_vote_ids_for_public_serialized = \
update_values['position_we_vote_ids_for_public_serialized']
values_changed = True
if 'speaker_name' in update_values:
existing_entry.speaker_name = update_values['speaker_name']
values_changed = True
if 'speaker_organization_we_vote_id' in update_values:
existing_entry.speaker_organization_we_vote_id = update_values['speaker_organization_we_vote_id']
values_changed = True
if 'speaker_voter_we_vote_id' in update_values:
existing_entry.speaker_voter_we_vote_id = update_values['speaker_voter_we_vote_id']
values_changed = True
# now go ahead and save this entry (update)
if values_changed:
existing_entry.save()
activity_notice_seed_updated = True
success = True
status += "ACTIVITY_NOTICE_SEED_UPDATED "
else:
activity_notice_seed_updated = False
success = True
status += "ACTIVITY_NOTICE_SEED_NOT_UPDATED-NO_CHANGES "
except Exception as e:
success = False
activity_notice_seed_updated = False
status += "ACTIVITY_NOTICE_SEED_RETRIEVE_ERROR " + str(e) + ' '
results = {
'success': success,
'status': status,
'activity_notice_seed_updated': activity_notice_seed_updated,
'updated_activity_notice_seed': existing_entry,
}
return results
def update_or_create_activity_comment(
self,
activity_comment_we_vote_id='',
updated_values={},
commenter_voter_we_vote_id='',
):
"""
Either update or create an ActivityComment.
"""
activity_comment = None
activity_comment_created = False
activity_comment_found = False
missing_variable = False
status = ""
statement_text = updated_values['statement_text'] if 'statement_text' in updated_values else ''
if not commenter_voter_we_vote_id:
missing_variable = True
status += 'MISSING_VOTER_WE_VOTE_ID '
if not positive_value_exists(activity_comment_we_vote_id) and not positive_value_exists(statement_text):
missing_variable = True
status += 'MISSING_BOTH_ID_AND_STATEMENT_TEXT '
if missing_variable:
success = False
results = {
'success': success,
'status': status,
'activity_comment': activity_comment,
'activity_comment_found': activity_comment_found,
'activity_comment_created': activity_comment_created,
}
return results
if positive_value_exists(activity_comment_we_vote_id):
try:
activity_comment = ActivityComment.objects.get(
we_vote_id=activity_comment_we_vote_id,
commenter_voter_we_vote_id=updated_values['commenter_voter_we_vote_id'])
activity_comment_found = True
# Instead of manually mapping them above, we do it this way for flexibility
for key, value in updated_values.items():
setattr(activity_comment, key, value)
activity_comment.save()
success = True
status += 'ACTIVITY_COMMENT_UPDATED '
except Exception as e:
success = False
status += "ACTIVITY_COMMENT_UPDATE_FAILURE: " + str(e) + " "
else:
try:
activity_comment = ActivityComment.objects.create(
date_created=now(),
commenter_voter_we_vote_id=updated_values['commenter_voter_we_vote_id'])
activity_comment_created = True
# Instead of manually mapping them above, we do it this way for flexibility
for key, value in updated_values.items():
setattr(activity_comment, key, value)
activity_comment.save()
activity_comment_found = True
success = True
status += 'ACTIVITY_COMMENT_CREATED '
except Exception as e:
success = False
status += "ACTIVITY_COMMENT_CREATE_FAILURE: " + str(e) + " "
results = {
'success': success,
'status': status,
'activity_comment': activity_comment,
'activity_comment_found': activity_comment_found,
'activity_comment_created': activity_comment_created,
}
return results
def update_or_create_activity_post(
self,
activity_post_we_vote_id='',
updated_values={},
speaker_voter_we_vote_id='',
):
"""
Either update or create an ActivityPost.
"""
activity_post = None
activity_post_created = False
activity_post_found = False
missing_variable = False
status = ""
statement_text = updated_values['statement_text'] if 'statement_text' in updated_values else ''
if not speaker_voter_we_vote_id:
missing_variable = True
status += 'MISSING_VOTER_WE_VOTE_ID '
if not positive_value_exists(activity_post_we_vote_id) and not positive_value_exists(statement_text):
missing_variable = True
status += 'MISSING_BOTH_ID_AND_STATEMENT_TEXT '
if missing_variable:
success = False
results = {
'success': success,
'status': status,
'activity_post': activity_post,
'activity_post_found': activity_post_found,
'activity_post_created': activity_post_created,
}
return results
if positive_value_exists(activity_post_we_vote_id):
try:
activity_post = ActivityPost.objects.get(
we_vote_id=activity_post_we_vote_id,
speaker_voter_we_vote_id=updated_values['speaker_voter_we_vote_id'])
activity_post_found = True
# Instead of manually mapping them above, we do it this way for flexibility
for key, value in updated_values.items():
setattr(activity_post, key, value)
activity_post.save()
success = True
status += 'ACTIVITY_POST_UPDATED '
except Exception as e:
success = False
status += "ACTIVITY_POST_UPDATE_FAILURE: " + str(e) + " "
else:
try:
activity_post = ActivityPost.objects.create(
date_created=now(),
speaker_voter_we_vote_id=updated_values['speaker_voter_we_vote_id'])
activity_post_created = True
# Instead of manually mapping them above, we do it this way for flexibility
for key, value in updated_values.items():
setattr(activity_post, key, value)
activity_post.save()
activity_post_found = True
success = True
status += 'ACTIVITY_POST_CREATED '
except Exception as e:
success = False
status += "ACTIVITY_POST_CREATE_FAILURE: " + str(e) + " "
results = {
'success': success,
'status': status,
'activity_post': activity_post,
'activity_post_found': activity_post_found,
'activity_post_created': activity_post_created,
}
return results
def update_speaker_name_in_bulk(
self,
speaker_voter_we_vote_id='',
speaker_name=''):
status = ""
success = True
if not positive_value_exists(speaker_voter_we_vote_id):
success = False
status += 'VALID_VOTER_WE_VOTE_ID_MISSING '
results = {
'success': success,
'status': status,
}
return results
if not positive_value_exists(speaker_name):
success = False
status += 'SPEAKER_NAME_MUST_EXIST '
results = {
'success': success,
'status': status,
}
return results
try:
updated_count = ActivityComment.objects.all().filter(
commenter_voter_we_vote_id__iexact=speaker_voter_we_vote_id,
deleted=False
).update(
commenter_name=speaker_name)
status += 'ACTIVITY_COMMENTS_UPDATED: (' + str(updated_count) + ') '
except ActivityComment.DoesNotExist:
# No data found. Not a problem.
status += 'NO_ACTIVITY_COMMENTS_FOUND '
except Exception as e:
success = False
status += 'FAILED update_speaker_name_in_bulk ActivityComment ' + str(e) + ' '
try:
updated_count = ActivityNotice.objects.all().filter(
speaker_voter_we_vote_id__iexact=speaker_voter_we_vote_id,
deleted=False
).update(
speaker_name=speaker_name)
status += 'ACTIVITY_NOTICES_UPDATED: (' + str(updated_count) + ') '
except ActivityNotice.DoesNotExist:
# No data found. Not a problem.
status += 'NO_ACTIVITY_NOTICES_FOUND '
except Exception as e:
success = False
status += 'FAILED update_speaker_name_in_bulk ActivityNotice ' + str(e) + ' '
try:
updated_seed_count1 = ActivityNoticeSeed.objects.all().filter(
speaker_voter_we_vote_id__iexact=speaker_voter_we_vote_id,
deleted=False
).update(
speaker_name=speaker_name)
updated_seed_count2 = ActivityNoticeSeed.objects.all().filter(
recipient_voter_we_vote_id__iexact=speaker_voter_we_vote_id,
deleted=False
).update(
recipient_name=speaker_name)
status += 'ACTIVITY_NOTICE_SEEDS_UPDATED: ' \
'(' + str(updated_seed_count1) + '/' + str(updated_seed_count2) + ') '
except ActivityNoticeSeed.DoesNotExist:
# No data found. Not a problem.
status += 'NO_ACTIVITY_NOTICE_SEEDS_FOUND '
except Exception as e:
success = False
status += 'FAILED update_speaker_name_in_bulk ActivityNoticeSeed ' + str(e) + ' '
try:
updated_count = ActivityPost.objects.all().filter(
speaker_voter_we_vote_id__iexact=speaker_voter_we_vote_id,
deleted=False
).update(
speaker_name=speaker_name)
status += 'ACTIVITY_POSTS_UPDATED: (' + str(updated_count) + ') '
except ActivityPost.DoesNotExist:
# No data found. Not a problem.
status += 'NO_ACTIVITY_POSTS_FOUND '
except Exception as e:
success = False
status += 'FAILED update_speaker_name_in_bulk ActivityPost ' + str(e) + ' '
results = {
'success': success,
'status': status,
}
return results
class ActivityNotice(models.Model):
"""
This is a notice for the notification drop-down menu, for one person
"""
activity_notice_seed_id = models.PositiveIntegerField(default=None, null=True)
activity_tidbit_we_vote_id = models.CharField(max_length=255, default=None, null=True) # subject of notice
date_of_notice = models.DateTimeField(null=True)
date_last_changed = models.DateTimeField(null=True, auto_now=True)
activity_notice_clicked = models.BooleanField(default=False)
activity_notice_seen = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
kind_of_notice = models.CharField(max_length=50, default=None, null=True)
kind_of_seed = models.CharField(max_length=50, default=None, null=True)
new_positions_entered_count = models.PositiveIntegerField(default=None, null=True)
number_of_comments = models.PositiveIntegerField(default=None, null=True)
number_of_likes = models.PositiveIntegerField(default=None, null=True)
position_name_list_serialized = models.TextField(default=None, null=True)
position_we_vote_id_list_serialized = models.TextField(default=None, null=True)
speaker_name = models.CharField(max_length=255, default=None, null=True)
speaker_organization_we_vote_id = models.CharField(max_length=255, default=None, null=True)
speaker_voter_we_vote_id = models.CharField(max_length=255, default=None, null=True)
recipient_voter_we_vote_id = models.CharField(max_length=255, default=None, null=True)
is_in_app = models.BooleanField(default=False)
# Track Email send progress
send_to_email = models.BooleanField(default=False)
scheduled_to_email = models.BooleanField(default=False)
sent_to_email = models.BooleanField(default=False)
# Track SMS send progress
send_to_sms = models.BooleanField(default=False)
scheduled_to_sms = models.BooleanField(default=False)
sent_to_sms = models.BooleanField(default=False)
speaker_profile_image_url_medium = models.TextField(blank=True, null=True)
speaker_profile_image_url_tiny = models.TextField(blank=True, null=True)
statement_text_preview = models.CharField(max_length=255, default=None, null=True)
class ActivityNoticeSeed(models.Model):
"""
This is the "seed" for a notice for the notification drop-down menu, which is used before we "distribute" it
out to an ActivityNotice, which gets shown to an individual voter.
"""
activity_notices_created = models.BooleanField(default=False)
activity_tidbit_we_vote_ids_for_friends_serialized = models.TextField(default=None, null=True)
activity_tidbit_we_vote_ids_for_public_serialized = models.TextField(default=None, null=True)
date_of_notice_earlier_than_update_window = models.BooleanField(default=False)
activity_notices_scheduled = models.BooleanField(default=False)
added_to_voter_daily_summary = models.BooleanField(default=False)
date_of_notice = models.DateTimeField(null=True)
date_last_changed = models.DateTimeField(null=True, auto_now=True)
deleted = models.BooleanField(default=False)
kind_of_seed = models.CharField(max_length=50, default=None, null=True)
# Positions that were changed: NOTICE_FRIEND_ENDORSEMENTS
position_names_for_friends_serialized = models.TextField(default=None, null=True)
position_names_for_public_serialized = models.TextField(default=None, null=True)
position_we_vote_ids_for_friends_serialized = models.TextField(default=None, null=True)
position_we_vote_ids_for_public_serialized = models.TextField(default=None, null=True)
# Voter receiving the daily summary: NOTICE_VOTER_DAILY_SUMMARY
recipient_name = models.CharField(max_length=255, default=None, null=True)
recipient_voter_we_vote_id = models.CharField(max_length=255, default=None, null=True)
speaker_voter_we_vote_ids_serialized = models.TextField(default=None, null=True)
speaker_organization_we_vote_ids_serialized = models.TextField(default=None, null=True)
# Track Email send progress for NOTICE_VOTER_DAILY_SUMMARY_SEED
send_to_email = models.BooleanField(default=False)
scheduled_to_email = models.BooleanField(default=False)
sent_to_email = models.BooleanField(default=False)
# Track SMS send progress for NOTICE_VOTER_DAILY_SUMMARY_SEED
send_to_sms = models.BooleanField(default=False)
scheduled_to_sms = models.BooleanField(default=False)
sent_to_sms = models.BooleanField(default=False)
# Voter who took the action: NOTICE_ACTIVITY_POST_SEED, NOTICE_FRIEND_ENDORSEMENTS_SEED
speaker_name = models.CharField(max_length=255, default=None, null=True)
speaker_organization_we_vote_id = models.CharField(max_length=255, default=None, null=True)
speaker_voter_we_vote_id = models.CharField(max_length=255, default=None, null=True)
speaker_profile_image_url_medium = models.TextField(blank=True, null=True)
speaker_profile_image_url_tiny = models.TextField(blank=True, null=True)
speaker_twitter_handle = models.CharField(max_length=255, null=True, unique=False, default=None)
speaker_twitter_followers_count = models.IntegerField(default=0)
statement_text_preview = models.CharField(max_length=255, default=None, null=True)
# we_vote_id of this SEED
we_vote_id = models.CharField(max_length=255, default=None, null=True, unique=True)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_activity_notice_seed_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "actseed" = tells us this is a unique id for an ActivityNoticeSeed
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}actseed{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(ActivityNoticeSeed, self).save(*args, **kwargs)
class ActivityPost(models.Model):
"""
A voter-created post for the activity list
"""
date_created = models.DateTimeField(null=True)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
deleted = models.BooleanField(default=False)
speaker_name = models.CharField(max_length=255, default=None, null=True)
speaker_organization_we_vote_id = models.CharField(max_length=255, default=None, null=True)
speaker_twitter_followers_count = models.PositiveIntegerField(default=None, null=True)
speaker_twitter_handle = models.CharField(max_length=255, default=None, null=True)
speaker_voter_we_vote_id = models.CharField(max_length=255, default=None, null=True)
speaker_profile_image_url_medium = models.TextField(blank=True, null=True)
speaker_profile_image_url_tiny = models.TextField(blank=True, null=True)
statement_text = models.TextField(null=True, blank=True)
visibility_is_public = models.BooleanField(default=False)
we_vote_id = models.CharField(max_length=255, default=None, null=True, unique=True)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_activity_post_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "post" = tells us this is a unique id for an ActivityPost
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}post{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(ActivityPost, self).save(*args, **kwargs)
def get_lifespan_of_seed(kind_of_seed):
if kind_of_seed == NOTICE_ACTIVITY_POST_SEED:
return 14400 # 4 hours * 60 minutes * 60 seconds/minute
if kind_of_seed == NOTICE_FRIEND_ENDORSEMENTS_SEED:
return 21600 # 6 hours * 60 minutes * 60 seconds/minute
if kind_of_seed == NOTICE_VOTER_DAILY_SUMMARY_SEED:
return 43200 # 12 hours * 60 minutes * 60 seconds/minute
return 0
| {
"repo_name": "wevote/WeVoteServer",
"path": "activity/models.py",
"copies": "1",
"size": "72637",
"license": "mit",
"hash": 3093900268681364500,
"line_mean": 45.9838292367,
"line_max": 120,
"alpha_frac": 0.568759723,
"autogenerated": false,
"ratio": 4.042575690115761,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5111335413115761,
"avg_score": null,
"num_lines": null
} |
ACTIVITYPUB_COMMENT = {
'@context': ['https://www.w3.org/ns/activitystreams',
{'ostatus': 'http://ostatus.org#',
'atomUri': 'ostatus:atomUri',
'inReplyToAtomUri': 'ostatus:inReplyToAtomUri',
'conversation': 'ostatus:conversation',
'sensitive': 'as:sensitive',
'Hashtag': 'as:Hashtag',
'toot': 'http://joinmastodon.org/ns#',
'Emoji': 'toot:Emoji',
'focalPoint': {'@container': '@list', '@id': 'toot:focalPoint'},
'blurhash': 'toot:blurhash'}],
'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/activity',
'type': 'Create',
'actor': 'https://diaspodon.fr/users/jaywink',
'published': '2019-06-29T21:08:45Z',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'object': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'type': 'Note',
'summary': None,
'inReplyTo': 'https://dev.jasonrobinson.me/content/653bad70-41b3-42c9-89cb-c4ee587e68e4/',
'published': '2019-06-29T21:08:45Z',
'url': 'https://diaspodon.fr/@jaywink/102356911717767237',
'attributedTo': 'https://diaspodon.fr/users/jaywink',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'sensitive': False,
'atomUri': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'inReplyToAtomUri': 'https://dev.jasonrobinson.me/content/653bad70-41b3-42c9-89cb-c4ee587e68e4/',
'conversation': 'tag:diaspodon.fr,2019-06-28:objectId=2347687:objectType=Conversation',
'content': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>',
'contentMap': {'en': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>'},
'attachment': [],
'tag': [{'type': 'Mention',
'href': 'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/',
'name': '@jaywink@dev.jasonrobinson.me'}],
'replies': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'type': 'Collection',
'first': {'type': 'CollectionPage',
'partOf': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'items': []}}},
'signature': {'type': 'RsaSignature2017',
'creator': 'https://diaspodon.fr/users/jaywink#main-key',
'created': '2019-06-29T21:08:45Z',
'signatureValue': 'SjDACS7Z/Cb1SEC3AtxEokID5SHAYl7kpys/hhmaRbpXuFKCxfj2P9BmH8QhLnuam3sENZlrnBOcB5NlcBhIfwo/Xh242RZBmPQf+edTVYVCe1j19dihcftNCHtnqAcKwp/51dNM/OlKu2730FrwvOUXVIPtB7iVqkseO9TRzDYIDj+zBTksnR/NAYtq6SUpmefXfON0uW3N3Uq6PGfExJaS+aeqRf8cPGkZFSIUQZwOLXbIpb7BFjJ1+y1OMOAJueqvikUprAit3v6BiNWurAvSQpC7WWMFUKyA79/xtkO9kIPA/Q4C9ryqdzxZJ0jDhXiaIIQj2JZfIADdjLZHJA=='}
}
ACTIVITYPUB_FOLLOW = {
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://w3id.org/security/v1",
],
"id": "https://example.com/follow",
"type": "Follow",
"actor": "https://example.com/actor",
"object": "https://example.org/actor",
}
ACTIVITYPUB_PROFILE = {
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://w3id.org/security/v1",
{
"manuallyApprovesFollowers": "as:manuallyApprovesFollowers",
"sensitive": "as:sensitive",
"movedTo": {
"@id": "as:movedTo",
"@type": "@id"
},
"alsoKnownAs": {
"@id": "as:alsoKnownAs",
"@type": "@id"
},
"Hashtag": "as:Hashtag",
"ostatus": "http://ostatus.org#",
"atomUri": "ostatus:atomUri",
"inReplyToAtomUri": "ostatus:inReplyToAtomUri",
"conversation": "ostatus:conversation",
"toot": "http://joinmastodon.org/ns#",
"Emoji": "toot:Emoji",
"focalPoint": {
"@container": "@list",
"@id": "toot:focalPoint"
},
"featured": {
"@id": "toot:featured",
"@type": "@id"
},
"schema": "http://schema.org#",
"PropertyValue": "schema:PropertyValue",
"value": "schema:value"
}
],
"id": "https://diaspodon.fr/users/jaywink",
"type": "Person",
"following": "https://diaspodon.fr/users/jaywink/following",
"followers": "https://diaspodon.fr/users/jaywink/followers",
"inbox": "https://diaspodon.fr/users/jaywink/inbox",
"outbox": "https://diaspodon.fr/users/jaywink/outbox",
"featured": "https://diaspodon.fr/users/jaywink/collections/featured",
"preferredUsername": "jaywink",
"name": "Jason Robinson",
"summary": "<p>Temp account while implementing AP for Socialhome.</p><p><a href=\"https://jasonrobinson.me\" rel=\"nofollow noopener\" target=\"_blank\"><span class=\"invisible\">https://</span><span class=\"\">jasonrobinson.me</span><span class=\"invisible\"></span></a> / <a href=\"https://socialhome.network\" rel=\"nofollow noopener\" target=\"_blank\"><span class=\"invisible\">https://</span><span class=\"\">socialhome.network</span><span class=\"invisible\"></span></a> / <a href=\"https://feneas.org\" rel=\"nofollow noopener\" target=\"_blank\"><span class=\"invisible\">https://</span><span class=\"\">feneas.org</span><span class=\"invisible\"></span></a></p>",
"url": "https://diaspodon.fr/@jaywink",
"manuallyApprovesFollowers": False,
"publicKey": {
"id": "https://diaspodon.fr/users/jaywink#main-key",
"owner": "https://diaspodon.fr/users/jaywink",
"publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwVbaT5wvaZobfIB044ai\nhJg/XooEn2jSTnTY1K4mPmhdqYUmszpdXKp64OwA+f3SBuIUIkLAYUSB9Fu19zh+\nzOsoGI5gvA32DHY1vaqdKnT9gt3jKS5AdQ3bl0t9f4pPkO2I5YtQOWV1FvBcwPXG\nB0dIqj0fTqNK37FmyybrRD6uhjySddklN9gNsULTqYVDa0QSXVswTIW2jQudnNlp\nnEf3SfjlK9J8eKPF3hFK3PNXBTTZ4NydBSL3cVBinU0cFg8lUJOK8RI4qaetrVoQ\neKd7gCTSQ7RZh8kmkYmdlweb+ZtORT6Y5ZsotR8jwhAOFAqCt36B5+LX2UIw68Pk\nOwIDAQAB\n-----END PUBLIC KEY-----\n"
},
"tag": [],
"attachment": [],
"endpoints": {
"sharedInbox": "https://diaspodon.fr/inbox"
},
"icon": {
"type": "Image",
"mediaType": "image/jpeg",
"url": "https://diaspodon.fr/system/accounts/avatars/000/033/155/original/pnc__picked_media_be51984c-43e9-4266-9b9a-b74a61ae4167.jpg?1538505110"
},
"image": {
"type": "Image",
"mediaType": "image/png",
"url": "https://diaspodon.fr/system/accounts/headers/000/033/155/original/45ae49a08ecc5f27.png?1537060098"
}
}
ACTIVITYPUB_PROFILE_INVALID = {
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://w3id.org/security/v1",
],
"id": None,
"type": "Person",
"name": "Jason Robinson",
"url": "https://diaspodon.fr/@jaywink",
"publicKey": {
"id": "https://diaspodon.fr/users/jaywink#main-key",
"owner": "https://diaspodon.fr/users/jaywink",
"publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwVbaT5wvaZobfIB044ai\nhJg/XooEn2jSTnTY1K4mPmhdqYUmszpdXKp64OwA+f3SBuIUIkLAYUSB9Fu19zh+\nzOsoGI5gvA32DHY1vaqdKnT9gt3jKS5AdQ3bl0t9f4pPkO2I5YtQOWV1FvBcwPXG\nB0dIqj0fTqNK37FmyybrRD6uhjySddklN9gNsULTqYVDa0QSXVswTIW2jQudnNlp\nnEf3SfjlK9J8eKPF3hFK3PNXBTTZ4NydBSL3cVBinU0cFg8lUJOK8RI4qaetrVoQ\neKd7gCTSQ7RZh8kmkYmdlweb+ZtORT6Y5ZsotR8jwhAOFAqCt36B5+LX2UIw68Pk\nOwIDAQAB\n-----END PUBLIC KEY-----\n"
},
}
ACTIVITYPUB_PROFILE_WITH_DIASPORA_GUID = {
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://w3id.org/security/v1",
{
"vcard": "http://www.w3.org/2006/vcard/ns#",
"dfrn": "http://purl.org/macgirvin/dfrn/1.0/",
"diaspora": "https://diasporafoundation.org/ns/",
"litepub": "http://litepub.social/ns#",
"manuallyApprovesFollowers": "as:manuallyApprovesFollowers",
"sensitive": "as:sensitive",
"Hashtag": "as:Hashtag",
"directMessage": "litepub:directMessage"
}
],
"id": "https://friendica.feneas.org/profile/feneas",
"diaspora:guid": "76158462365bd347844d248732383358",
"type": "Organization",
"following": "https://friendica.feneas.org/following/feneas",
"followers": "https://friendica.feneas.org/followers/feneas",
"inbox": "https://friendica.feneas.org/inbox/feneas",
"outbox": "https://friendica.feneas.org/outbox/feneas",
"preferredUsername": "feneas",
"name": "Federated Networks Association",
"vcard:hasAddress": {
"@type": "vcard:Home",
"vcard:country-name": "",
"vcard:region": "",
"vcard:locality": ""
},
"summary": "Federated Networks Association Ry is a non-profit volunteer organization that aims to spread knowledge about federated web projects and help people and projects involved in this area.",
"url": "https://friendica.feneas.org/profile/feneas",
"manuallyApprovesFollowers": False,
"publicKey": {
"id": "https://friendica.feneas.org/profile/feneas#main-key",
"owner": "https://friendica.feneas.org/profile/feneas",
"publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtrN/RR6tSG0oH+G1oZmR\nHLx90i7ZgLAviJkfudnrgQDdSk5PKobKKyKw/GW07CuGtispNbI1nls3DdwnnPPl\naQYAvMW0MdLyPtviETecNcwRMMo4zOJxUMVooxjaK1bLstSxSbXpXXXWnn1niFkV\n+tHvtXXPJ12vXGoZjz0hKqzfKCND27rXCwFuecgvmMO8qeJB8aiJH5XfTPhAR0u1\nnA1Y2+GIdHP0Tnmt27nG0+9v9UqzIwlXOWRS2c5bG5XXZ35EnQEnfU9C+fhBzYEF\nBe5FwMR/sbK+a75ii0KxpP8x97MUnTkeuY7NgqY7GXJeKa9EZWK9xL7o5WDL5joe\ndNbDtyDL1CvUAB2JN+FabqbPeeHR1E5SFppBU7u0jyWTMejUqagBXdbf/Y6l7SRd\nJd51mSpmTkO9voybny7iXF4F2THZX0ZN77rDWYB8+NHAjdomfpz/wMwaaSALN6ZM\nSLxuobD0X3Rsv/2gDVL63UEXixQk4ZKBXfdZY2BmEKTKBezoWhR5yAbvTUPoklqL\nk98ajd9SIhE/YZD6nXD1S8CPiaV7qhw+2zdPIZDgq/S2P57eXcHam5ZasSzQGoo/\npVNvWzlRUo0GLIWEuA9FHklVpbuFlthaf5SP5qjKCtbYqLlzBPcxWaI0Y8Ei9ZL2\nTaosv23vnPSkN5wZp15lCrECAwEAAQ==\n-----END PUBLIC KEY-----\n"
},
"endpoints": {
"sharedInbox": "https://friendica.feneas.org/inbox"
},
"icon": {
"type": "Image",
"url": "https://friendica.feneas.org/photo/11107142045be6113fe88d7733317204-4.jpg?ts=1541804364"
},
"generator": {
"type": "Service",
"name": "Friendica 'Dalmatian Bellflower' 2019.12-1327",
"url": "https://friendica.feneas.org"
}
}
ACTIVITYPUB_RETRACTION = {
'@context': [
'https://www.w3.org/ns/activitystreams',
'https://w3id.org/security/v1',
{
'vcard': 'http://www.w3.org/2006/vcard/ns#',
'dfrn': 'http://purl.org/macgirvin/dfrn/1.0/',
'diaspora': 'https://diasporafoundation.org/ns/',
'litepub': 'http://litepub.social/ns#',
'manuallyApprovesFollowers': 'as:manuallyApprovesFollowers',
'sensitive': 'as:sensitive',
'Hashtag': 'as:Hashtag',
'directMessage': 'litepub:directMessage',
},
],
'id': 'https://friendica.feneas.org/objects/76158462-165d-3386-aa23-ba2090614385#Delete',
'type': 'Delete',
'actor': 'https://friendica.feneas.org/profile/jaywink',
'published': '2019-07-20T21:24:58Z',
'instrument': {
'type': 'Service',
'name': "Friendica 'Dalmatian Bellflower' 2019.06-1313",
'url': 'https://friendica.feneas.org',
},
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['https://friendica.feneas.org/followers/jaywink'],
'object': {
'id': 'https://friendica.feneas.org/objects/76158462-165d-3386-aa23-ba2090614385',
'type': 'Tombstone',
},
'signature': {
'type': 'RsaSignature2017',
'nonce': 'de299d5c8074548d8022d31059b4735870f29ea85d78c5214a423038273c5e5c',
'creator': 'https://friendica.feneas.org/profile/jaywink#main-key',
'created': '2019-07-20T21:39:13Z',
'signatureValue': 'lotsoftext',
},
}
ACTIVITYPUB_RETRACTION_SHARE = {'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mastodon.social/users/jaywink#announces/102571932479036987/undo',
'type': 'Undo',
'actor': 'https://mastodon.social/users/jaywink',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'object': {'id': 'https://mastodon.social/users/jaywink/statuses/102571932479036987/activity',
'type': 'Announce',
'actor': 'https://mastodon.social/users/jaywink',
'published': '2019-08-06T20:31:21Z',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['https://mastodon.art/users/asterii',
'https://mastodon.social/users/jaywink/followers'],
'object': 'https://mastodon.art/users/asterii/statuses/102571181579804095',
'atomUri': 'https://mastodon.social/users/jaywink/statuses/102571932479036987/activity'},
'signature': {'type': 'RsaSignature2017',
'creator': 'https://mastodon.social/users/jaywink#main-key',
'created': '2019-08-06T20:32:23Z',
'signatureValue': 'erI90OrrLqK1DiTqb4OO72XLcE7m74Fs4cH6s0plKKELHa7BZFQmtQYXKEgA9LwIUdSRrIurAUiaDWAw2sQZDg7opYo9x3z+GJDMZ3KxhBND7iHO8ZeGhV1ZBBKUMuBb3BfhOkd3ADp+RQ/fHcw6kOcViV2VsQduinAgQRpiutmGCLd/7eshqSF/aL4tFoAOyCskkm/5JDMNp2nnHNoXXJ+SZf7a8C6YPNDxWd7GzyQNeWkTBBdCJBPvS4HI0wQrTWemBvy6uP8k5QQ7FnqrrRrk/7zrcibFSInuYxiRTRV++rQ3irIbXNtoLhWQd36Iu5U22BclmkS1AAVBDUIj8w=='}}
ACTIVITYPUB_SHARE = {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': 'https://mastodon.social/users/jaywink/statuses/102560701449465612/activity',
'type': 'Announce',
'actor': 'https://mastodon.social/users/jaywink',
'published': '2019-08-04T20:55:09Z',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': [
'https://mastodon.social/users/Gargron',
'https://mastodon.social/users/jaywink/followers',
],
'object': 'https://mastodon.social/users/Gargron/statuses/102559779793316012',
'atomUri': 'https://mastodon.social/users/jaywink/statuses/102560701449465612/activity',
'signature': {
'type': 'RsaSignature2017',
'creator': 'https://mastodon.social/users/jaywink#main-key',
'created': '2019-08-04T20:55:09Z',
'signatureValue': 'fBW+hqP4ZslMf+1ZebqwuYAhQHvE5atsD/DLzda0eLY8xdf5XdROtoMHfVow5ZSq34w5CIPKOUUPo6aYx5bbLSd'
'JqwhoKOuwbtAmq3UvUp3vsiX671Cc4AL2b7sRL2sH0XfMtl5vpVaZM4LnpzGE3py91tQPCKY+azg6XUxJKOn6Kt'
'bo47LSpXZmzNacsfiiEmF48FlPojRZniz1wKNV+MIvvThIQlaahKAvPYHSF9INwMtlJpnVjc9T+9IkeSuHbNY4x'
'R9huLESZc3iZQk1OPIUsbqmMYVRm1G/WEnPpQwl4rH64YNptpxq8oxvtkECcK1ulT9+XxoCFaLg7pHr9Q==',
},
}
ACTIVITYPUB_UNDO_FOLLOW = {
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://w3id.org/security/v1",
],
"id": "https://example.com/undo",
"type": "Undo",
"actor": "https://example.com/actor",
"object": {
"id": "https://example.com/follow",
"type": "Follow",
"actor": "https://example.com/actor",
"object": "https://example.org/actor",
},
}
ACTIVITYPUB_POST = {
'@context': ['https://www.w3.org/ns/activitystreams',
{'ostatus': 'http://ostatus.org#',
'atomUri': 'ostatus:atomUri',
'inReplyToAtomUri': 'ostatus:inReplyToAtomUri',
'conversation': 'ostatus:conversation',
'sensitive': 'as:sensitive',
'Hashtag': 'as:Hashtag',
'toot': 'http://joinmastodon.org/ns#',
'Emoji': 'toot:Emoji',
'focalPoint': {'@container': '@list', '@id': 'toot:focalPoint'},
'blurhash': 'toot:blurhash'}],
'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/activity',
'type': 'Create',
'actor': 'https://diaspodon.fr/users/jaywink',
'published': '2019-06-29T21:08:45Z',
'to': 'https://www.w3.org/ns/activitystreams#Public',
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'object': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'type': 'Note',
'summary': None,
'inReplyTo': None,
'published': '2019-06-29T21:08:45Z',
'url': 'https://diaspodon.fr/@jaywink/102356911717767237',
'attributedTo': 'https://diaspodon.fr/users/jaywink',
'to': 'https://www.w3.org/ns/activitystreams#Public',
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'sensitive': False,
'atomUri': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'inReplyToAtomUri': None,
'conversation': 'tag:diaspodon.fr,2019-06-28:objectId=2347687:objectType=Conversation',
'content': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>',
'contentMap': {'en': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>'},
'attachment': [],
'tag': [{'type': 'Mention',
'href': 'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/',
'name': '@jaywink@dev.jasonrobinson.me'}],
'replies': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'type': 'Collection',
'first': {'type': 'CollectionPage',
'partOf': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'items': []}}},
'signature': {'type': 'RsaSignature2017',
'creator': 'https://diaspodon.fr/users/jaywink#main-key',
'created': '2019-06-29T21:08:45Z',
'signatureValue': 'SjDACS7Z/Cb1SEC3AtxEokID5SHAYl7kpys/hhmaRbpXuFKCxfj2P9BmH8QhLnuam3sENZlrnBOcB5NlcBhIfwo/Xh242RZBmPQf+edTVYVCe1j19dihcftNCHtnqAcKwp/51dNM/OlKu2730FrwvOUXVIPtB7iVqkseO9TRzDYIDj+zBTksnR/NAYtq6SUpmefXfON0uW3N3Uq6PGfExJaS+aeqRf8cPGkZFSIUQZwOLXbIpb7BFjJ1+y1OMOAJueqvikUprAit3v6BiNWurAvSQpC7WWMFUKyA79/xtkO9kIPA/Q4C9ryqdzxZJ0jDhXiaIIQj2JZfIADdjLZHJA=='}
}
ACTIVITYPUB_POST_WITH_TAGS = {
'@context': ['https://www.w3.org/ns/activitystreams',
{'ostatus': 'http://ostatus.org#',
'atomUri': 'ostatus:atomUri',
'inReplyToAtomUri': 'ostatus:inReplyToAtomUri',
'conversation': 'ostatus:conversation',
'sensitive': 'as:sensitive',
'Hashtag': 'as:Hashtag',
'toot': 'http://joinmastodon.org/ns#',
'Emoji': 'toot:Emoji',
'focalPoint': {'@container': '@list', '@id': 'toot:focalPoint'},
'blurhash': 'toot:blurhash'}],
'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/activity',
'type': 'Create',
'actor': 'https://diaspodon.fr/users/jaywink',
'published': '2019-06-29T21:08:45Z',
'to': 'https://www.w3.org/ns/activitystreams#Public',
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'object': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'type': 'Note',
'summary': None,
'inReplyTo': None,
'published': '2019-06-29T21:08:45Z',
'url': 'https://diaspodon.fr/@jaywink/102356911717767237',
'attributedTo': 'https://diaspodon.fr/users/jaywink',
'to': 'https://www.w3.org/ns/activitystreams#Public',
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'sensitive': False,
'atomUri': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'inReplyToAtomUri': None,
'conversation': 'tag:diaspodon.fr,2019-06-28:objectId=2347687:objectType=Conversation',
'content': '<p>boom <a href="https://mastodon.social/tags/test" class="mention hashtag" rel="tag">#<span>test</span></a></p>',
'attachment': [],
'tag': [{'type': 'Mention',
'href': 'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/',
'name': '@jaywink@dev.jasonrobinson.me'}],
'replies': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'type': 'Collection',
'first': {'type': 'CollectionPage',
'partOf': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'items': []}}},
'signature': {'type': 'RsaSignature2017',
'creator': 'https://diaspodon.fr/users/jaywink#main-key',
'created': '2019-06-29T21:08:45Z',
'signatureValue': 'SjDACS7Z/Cb1SEC3AtxEokID5SHAYl7kpys/hhmaRbpXuFKCxfj2P9BmH8QhLnuam3sENZlrnBOcB5NlcBhIfwo/Xh242RZBmPQf+edTVYVCe1j19dihcftNCHtnqAcKwp/51dNM/OlKu2730FrwvOUXVIPtB7iVqkseO9TRzDYIDj+zBTksnR/NAYtq6SUpmefXfON0uW3N3Uq6PGfExJaS+aeqRf8cPGkZFSIUQZwOLXbIpb7BFjJ1+y1OMOAJueqvikUprAit3v6BiNWurAvSQpC7WWMFUKyA79/xtkO9kIPA/Q4C9ryqdzxZJ0jDhXiaIIQj2JZfIADdjLZHJA=='}
}
ACTIVITYPUB_POST_WITH_MENTIONS = {'@context': ['https://www.w3.org/ns/activitystreams',
{'ostatus': 'http://ostatus.org#',
'atomUri': 'ostatus:atomUri',
'inReplyToAtomUri': 'ostatus:inReplyToAtomUri',
'conversation': 'ostatus:conversation',
'sensitive': 'as:sensitive'}],
'id': 'https://mastodon.social/users/jaywink/statuses/102750454691863505/activity',
'type': 'Create',
'actor': 'https://mastodon.social/users/jaywink',
'published': '2019-09-07T09:11:54Z',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['https://mastodon.social/users/jaywink/followers',
'https://dev3.jasonrobinson.me/u/jaywink/'],
'object': {'id': 'https://mastodon.social/users/jaywink/statuses/102750454691863505',
'type': 'Note',
'summary': None,
'inReplyTo': None,
'published': '2019-09-07T09:11:54Z',
'url': 'https://mastodon.social/@jaywink/102750454691863505',
'attributedTo': 'https://mastodon.social/users/jaywink',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['https://mastodon.social/users/jaywink/followers',
'https://dev3.jasonrobinson.me/u/jaywink/'],
'sensitive': False,
'atomUri': 'https://mastodon.social/users/jaywink/statuses/102750454691863505',
'inReplyToAtomUri': None,
'conversation': 'tag:mastodon.social,2019-09-07:objectId=123339599:objectType=Conversation',
'content': '<p><span class="h-card"><a href="https://dev3.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> need a mention payload - here!</p>',
'contentMap': {'en': '<p><span class="h-card"><a href="https://dev3.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> need a mention payload - here!</p>'},
'attachment': [],
'tag': [{'type': 'Mention',
'href': 'https://dev3.jasonrobinson.me/u/jaywink/',
'name': '@jaywink@dev3.jasonrobinson.me'}],
'replies': {'id': 'https://mastodon.social/users/jaywink/statuses/102750454691863505/replies',
'type': 'Collection',
'first': {'type': 'CollectionPage',
'next': 'https://mastodon.social/users/jaywink/statuses/102750454691863505/replies?only_other_accounts=true&page=true',
'partOf': 'https://mastodon.social/users/jaywink/statuses/102750454691863505/replies',
'items': []}}},
'signature': {'type': 'RsaSignature2017',
'creator': 'https://mastodon.social/users/jaywink#main-key',
'created': '2019-09-07T09:11:54Z',
'signatureValue': 'FOO'}}
ACTIVITYPUB_POST_WITH_SOURCE_MARKDOWN = {
'@context': ['https://www.w3.org/ns/activitystreams',
{'ostatus': 'http://ostatus.org#',
'atomUri': 'ostatus:atomUri',
'inReplyToAtomUri': 'ostatus:inReplyToAtomUri',
'conversation': 'ostatus:conversation',
'sensitive': 'as:sensitive',
'Hashtag': 'as:Hashtag',
'toot': 'http://joinmastodon.org/ns#',
'Emoji': 'toot:Emoji',
'focalPoint': {'@container': '@list', '@id': 'toot:focalPoint'},
'blurhash': 'toot:blurhash'}],
'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/activity',
'type': 'Create',
'actor': 'https://diaspodon.fr/users/jaywink',
'published': '2019-06-29T21:08:45Z',
'to': 'https://www.w3.org/ns/activitystreams#Public',
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'object': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'type': 'Note',
'summary': None,
'inReplyTo': None,
'published': '2019-06-29T21:08:45Z',
'url': 'https://diaspodon.fr/@jaywink/102356911717767237',
'attributedTo': 'https://diaspodon.fr/users/jaywink',
'to': 'https://www.w3.org/ns/activitystreams#Public',
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'sensitive': False,
'atomUri': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'inReplyToAtomUri': None,
'conversation': 'tag:diaspodon.fr,2019-06-28:objectId=2347687:objectType=Conversation',
'content': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>',
'source': {
'content': "@jaywink boom",
'mediaType': "text/markdown",
},
'contentMap': {'en': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>'},
'attachment': [],
'tag': [{'type': 'Mention',
'href': 'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/',
'name': '@jaywink@dev.jasonrobinson.me'}],
'replies': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'type': 'Collection',
'first': {'type': 'CollectionPage',
'partOf': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'items': []}}},
'signature': {'type': 'RsaSignature2017',
'creator': 'https://diaspodon.fr/users/jaywink#main-key',
'created': '2019-06-29T21:08:45Z',
'signatureValue': 'SjDACS7Z/Cb1SEC3AtxEokID5SHAYl7kpys/hhmaRbpXuFKCxfj2P9BmH8QhLnuam3sENZlrnBOcB5NlcBhIfwo/Xh242RZBmPQf+edTVYVCe1j19dihcftNCHtnqAcKwp/51dNM/OlKu2730FrwvOUXVIPtB7iVqkseO9TRzDYIDj+zBTksnR/NAYtq6SUpmefXfON0uW3N3Uq6PGfExJaS+aeqRf8cPGkZFSIUQZwOLXbIpb7BFjJ1+y1OMOAJueqvikUprAit3v6BiNWurAvSQpC7WWMFUKyA79/xtkO9kIPA/Q4C9ryqdzxZJ0jDhXiaIIQj2JZfIADdjLZHJA=='}
}
ACTIVITYPUB_POST_WITH_SOURCE_BBCODE = {
'@context': ['https://www.w3.org/ns/activitystreams',
{'ostatus': 'http://ostatus.org#',
'atomUri': 'ostatus:atomUri',
'inReplyToAtomUri': 'ostatus:inReplyToAtomUri',
'conversation': 'ostatus:conversation',
'sensitive': 'as:sensitive',
'Hashtag': 'as:Hashtag',
'toot': 'http://joinmastodon.org/ns#',
'Emoji': 'toot:Emoji',
'focalPoint': {'@container': '@list', '@id': 'toot:focalPoint'},
'blurhash': 'toot:blurhash'}],
'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/activity',
'type': 'Create',
'actor': 'https://diaspodon.fr/users/jaywink',
'published': '2019-06-29T21:08:45Z',
'to': 'https://www.w3.org/ns/activitystreams#Public',
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'object': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'type': 'Note',
'summary': None,
'inReplyTo': None,
'published': '2019-06-29T21:08:45Z',
'url': 'https://diaspodon.fr/@jaywink/102356911717767237',
'attributedTo': 'https://diaspodon.fr/users/jaywink',
'to': 'https://www.w3.org/ns/activitystreams#Public',
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'sensitive': False,
'atomUri': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'inReplyToAtomUri': None,
'conversation': 'tag:diaspodon.fr,2019-06-28:objectId=2347687:objectType=Conversation',
'content': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>',
'source': {
'content': "[url=https://example.com]jaywink[/url] boom",
'mediaType': "text/bbcode",
},
'contentMap': {'en': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>'},
'attachment': [],
'tag': [{'type': 'Mention',
'href': 'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/',
'name': '@jaywink@dev.jasonrobinson.me'}],
'replies': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'type': 'Collection',
'first': {'type': 'CollectionPage',
'partOf': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'items': []}}},
'signature': {'type': 'RsaSignature2017',
'creator': 'https://diaspodon.fr/users/jaywink#main-key',
'created': '2019-06-29T21:08:45Z',
'signatureValue': 'SjDACS7Z/Cb1SEC3AtxEokID5SHAYl7kpys/hhmaRbpXuFKCxfj2P9BmH8QhLnuam3sENZlrnBOcB5NlcBhIfwo/Xh242RZBmPQf+edTVYVCe1j19dihcftNCHtnqAcKwp/51dNM/OlKu2730FrwvOUXVIPtB7iVqkseO9TRzDYIDj+zBTksnR/NAYtq6SUpmefXfON0uW3N3Uq6PGfExJaS+aeqRf8cPGkZFSIUQZwOLXbIpb7BFjJ1+y1OMOAJueqvikUprAit3v6BiNWurAvSQpC7WWMFUKyA79/xtkO9kIPA/Q4C9ryqdzxZJ0jDhXiaIIQj2JZfIADdjLZHJA=='}
}
ACTIVITYPUB_POST_OBJECT = {
'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'type': 'Note',
'summary': None,
'inReplyTo': None,
'published': '2019-06-29T21:08:45Z',
'url': 'https://diaspodon.fr/@jaywink/102356911717767237',
'attributedTo': 'https://diaspodon.fr/users/jaywink',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['https://diaspodon.fr/users/jaywink/followers',
'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/'],
'sensitive': False,
'atomUri': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237',
'inReplyToAtomUri': None,
'conversation': 'tag:diaspodon.fr,2019-06-28:objectId=2347687:objectType=Conversation',
'content': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>',
'contentMap': {'en': '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" class="u-url mention">@<span>jaywink</span></a></span> boom</p>'},
'attachment': [],
'tag': [{'type': 'Mention',
'href': 'https://dev.jasonrobinson.me/p/d4574854-a5d7-42be-bfac-f70c16fcaa97/',
'name': '@jaywink@dev.jasonrobinson.me'}],
'replies': {'id': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'type': 'Collection',
'first': {'type': 'CollectionPage',
'partOf': 'https://diaspodon.fr/users/jaywink/statuses/102356911717767237/replies',
'items': []}},
}
ACTIVITYPUB_POST_IMAGES = {'@context': ['https://www.w3.org/ns/activitystreams',
{'ostatus': 'http://ostatus.org#',
'atomUri': 'ostatus:atomUri',
'inReplyToAtomUri': 'ostatus:inReplyToAtomUri',
'conversation': 'ostatus:conversation',
'sensitive': 'as:sensitive',
'Hashtag': 'as:Hashtag',
'toot': 'http://joinmastodon.org/ns#',
'Emoji': 'toot:Emoji',
'focalPoint': {'@container': '@list', '@id': 'toot:focalPoint'},
'blurhash': 'toot:blurhash'}],
'id': 'https://mastodon.social/users/jaywink/statuses/102611770245850345/activity',
'type': 'Create',
'actor': 'https://mastodon.social/users/jaywink',
'published': '2019-08-13T21:22:37Z',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['https://mastodon.social/users/jaywink/followers'],
'object': {'id': 'https://mastodon.social/users/jaywink/statuses/102611770245850345',
'type': 'Note',
'summary': None,
'inReplyTo': None,
'published': '2019-08-13T21:22:37Z',
'url': 'https://mastodon.social/@jaywink/102611770245850345',
'attributedTo': 'https://mastodon.social/users/jaywink',
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['https://mastodon.social/users/jaywink/followers'],
'sensitive': False,
'atomUri': 'https://mastodon.social/users/jaywink/statuses/102611770245850345',
'inReplyToAtomUri': None,
'conversation': 'tag:mastodon.social,2019-08-13:objectId=119290371:objectType=Conversation',
'content': '<p>image test</p>',
'contentMap': {'en': '<p>image test</p>'},
'attachment': [{'type': 'Document',
'mediaType': 'image/jpeg',
'url': 'https://files.mastodon.social/media_attachments/files/017/642/079/original/f51b0aee0ee1f2e1.jpg',
'name': None,
'blurhash': 'UaH1x+IpD*RktToft6s:0f%2tQj@xsWWRkNG'},
{'type': 'Document',
'mediaType': 'video/mp4',
'url': 'https://files.mastodon.social/media_attachments/files/017/642/084/original/e18dda257e5e7078.mp4',
'name': None,
'blurhash': 'UH9jv0ay00Rj%MM{IU%M%MWBRjofxuayM{t7'}],
'tag': [],
'replies': {'id': 'https://mastodon.social/users/jaywink/statuses/102611770245850345/replies',
'type': 'Collection',
'first': {'type': 'CollectionPage',
'partOf': 'https://mastodon.social/users/jaywink/statuses/102611770245850345/replies',
'items': []}}},
'signature': {'type': 'RsaSignature2017',
'creator': 'https://mastodon.social/users/jaywink#main-key',
'created': '2019-08-13T21:22:37Z',
'signatureValue': 'Ia61wdHHIy9gCY5YwqlPtd80eJ2liT9Yi3yHdRdP+fQ5/9np3wHJKNPa7gdzP/BiRzh6aOa2dHWJjB8mOnHYrYBn6Fl3RlCniqousVTDue/ek0zvcFWmlhfja02meDiva+t61O/6Ul1l4tQObMorSf7GbEPePlQiozr/SR/5HIj3SDP0Y8JmlTvhSFgiH6obdroaIYEMQAoYZVcYofGeQUEhotDRp0OGQ4UaPBli4WyzVOUqHMW6pw90QQzZF9XpimwAemk9oAgPmGEPkugFeHfrWt1l84KLdwqwWD8FRIep7gCtu6MpCA8TX4JC5yJvyQ9GbZLZfJSQ6t5wSrcafw=='}}
ACTIVITYPUB_POST_OBJECT_IMAGES = {
"@context": ["https://www.w3.org/ns/activitystreams",
{"ostatus": "http://ostatus.org#", "atomUri": "ostatus:atomUri",
"inReplyToAtomUri": "ostatus:inReplyToAtomUri",
"conversation": "ostatus:conversation", "sensitive": "as:sensitive",
"Hashtag": "as:Hashtag", "toot": "http://joinmastodon.org/ns#",
"Emoji": "toot:Emoji",
"focalPoint": {"@container": "@list", "@id": "toot:focalPoint"},
"blurhash": "toot:blurhash"}],
"id": "https://mastodon.social/users/foobar/statuses/34324r",
"type": "Note", "summary": None, "inReplyTo": None,
"published": "2019-08-18T02:03:17Z",
"url": "https://mastodon.social/@foobar/34324r",
"attributedTo": "https://mastodon.social/users/foobar",
"to": ["https://www.w3.org/ns/activitystreams#Public"],
"cc": ["https://mastodon.social/users/foobar/followers"], "sensitive": False,
"atomUri": "https://mastodon.social/users/foobar/statuses/34324r",
"inReplyToAtomUri": None,
"conversation": None,
"content": "foobar",
"contentMap": {
"en": "foobar"
},
"attachment": [{"type": "Document", "mediaType": "image/jpeg",
"url": "https://files.mastodon.social/media_attachments/files/017/792/237/original/foobar.jpg",
"name": None, "blurhash": "fff"}],
"tag": [],
"replies": {}
}
| {
"repo_name": "jaywink/social-federation",
"path": "federation/tests/fixtures/payloads/activitypub.py",
"copies": "2",
"size": "34390",
"license": "bsd-3-clause",
"hash": 6878831494804045000,
"line_mean": 51.3439878234,
"line_max": 836,
"alpha_frac": 0.6901134051,
"autogenerated": false,
"ratio": 2.3611397185032614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9003225242175603,
"avg_score": 0.009605576285531742,
"num_lines": 657
} |
#Activity recognizer
import sys
from math import sqrt
import itertools
from pyspark import SparkContext
from pyspark.mllib.linalg import Matrices, Vectors
from pyspark.mllib.tree import DecisionTree, DecisionTreeModel
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.mllib.util import MLUtils
from time import time, sleep
from pyspark.mllib.stat import Statistics
#import datetime
import serial
from pyspark import rdd
from pprint import pprint
from pyspark import SparkContext
#############################
# # # # # Functions # # # # #
#############################
def computeResultantAcc(xx,yy,zz):
Result = []
for a,b,c in itertools.izip(xx,yy,zz):
Result = sqrt(((a)*(a))+((b)*(b))+((c)*(c)))
return Result;
def getComputations(x,y,z):
features = []
acc_x = x.map(lambda x:x)
acc_y = y.map(lambda y:y)
acc_z = z.map(lambda z:z)
x_mean = acc_x.mean()
y_mean = acc_y.mean()
z_mean = acc_z.mean()
features.append(x_mean)
features.append(y_mean)
features.append(z_mean)
x_variance = acc_x.variance()
y_variance = acc_y.variance()
z_variance = acc_z.variance()
features.append(x_variance)
features.append(y_variance)
features.append(z_variance)
x_stdev = acc_x.stdev()
y_stdev = acc_y.stdev()
z_stdev = acc_z.stdev()
features.append(x_stdev)
features.append(y_stdev)
features.append(z_stdev)
avgabsDiff_x = acc_x.map(lambda x:x-x_mean).mean()
avgabsDiff_y = acc_y.map(lambda y:y-y_mean).mean()
avgabsDiff_z = acc_z.map(lambda z:z-z_mean).mean()
features.append(avgabsDiff_x)
features.append(avgabsDiff_y)
features.append(avgabsDiff_z)
Resultant = computeResultantAcc(acc_x.collect(),acc_y.collect(),acc_z.collect())
features.append(Resultant)
Diff_axs = float(float(x_mean) - float(y_mean))
features.append(Diff_axs)
return features;
##################################
# # # # # Configurations # # # # #
##################################
#Vars
SECONDS = 100000000
ONE_SECOND = 100000000
Activities = ["Walking","Jogging","Standing","Sitting"]
#apache spark
sc = SparkContext(appName="Activity recognizer")
#read model
#PredictionModel = DecisionTreeModel.load(sc, "/usr/local/spark-2.1.1/cpmx8/saved_models/two_decisionTree")
PredictionModel = RandomForestModel.load(sc, "/usr/local/spark-2.1.1/cpmx8/saved_models/two_randomForest")
############################
# # # # # Begining # # # # #
############################
#Eternal loop
while True:
#pass
#Set acumulator sensor variables
acc_x = []
acc_y = []
acc_z = []
g_acc = []
#sleep(5)
tdiff = 0
t1 = time()
while float(tdiff) < float(SECONDS/ONE_SECOND):
#Serial port
ser = serial.Serial('/dev/ttyACM0', 2000000, timeout=2, xonxoff=False, rtscts=False, dsrdtr=False) #Tried with and without the last 3 parameters, and also at 1Mbps, same happens.
ser.flushInput()
ser.flushOutput()
#pass
#read sensor
data_raw = ser.readline()
#parse the data
parsed = data_raw.split(',')
#send to array or list to acumulators
acc_x.append(float(parsed[0]))
acc_y.append(float(parsed[1]))
acc_z.append(float(parsed[2].split('\n')[0]))
#verify the time to reloop
print(float(parsed[0]),float(parsed[1]),float(parsed[2]))
t2 = time()
tdiff = t2 - t1
ser.close()
#compute statistical data with acumulators
features = getComputations(sc.parallelize(filter(None,acc_x)),\
sc.parallelize(filter(None,acc_y)),\
sc.parallelize(filter(None,acc_z))\
)
#query model for prediction
prediction = PredictionModel.predict(features)#.map(lambda r: r.features))
#get result
#Translate result to correct activity word
Result = Activities[int(prediction)]
print("----------------------------------------------------")
print(" ------------------" + str(Result) + "------------------")
print("----------------------------------------------------")
sleep(0.5)
#send result to server
| {
"repo_name": "amadeusmx/cpmx8-machine-learning-with-accelerometer-of-xdk-bosch",
"path": "Activity recognizer.py",
"copies": "1",
"size": "3881",
"license": "apache-2.0",
"hash": 566122521760799550,
"line_mean": 26.7214285714,
"line_max": 180,
"alpha_frac": 0.6413295542,
"autogenerated": false,
"ratio": 2.9716692189892804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8844583585608958,
"avg_score": 0.05368303751606443,
"num_lines": 140
} |
import os
import pandas as pd
import openmatrix as omx
def read_manifest(manifest_file_name):
column_map = {
'Token': 'skim_key1',
'TimePeriod': 'skim_key2',
'File': 'source_file_name',
'Matrix': 'source_key',
}
converters = {
col: str for col in column_map.keys()
}
manifest = pd.read_csv(manifest_file_name, header=0, comment='#', converters=converters)
manifest.rename(columns=column_map, inplace=True)
return manifest
def omx_getMatrix(omx_file_name, omx_key):
with omx.open_file(omx_file_name, 'r') as omx_file:
if omx_key not in omx_file.list_matrices():
print "Source matrix with key '%s' not found in file '%s" % (omx_key, omx_file,)
print omx_file.list_matrices()
raise RuntimeError("Source matrix with key '%s' not found in file '%s"
% (omx_key, omx_file,))
data = omx_file[omx_key]
return data
manifest_dir = '.'
source_data_dir = '.'
dest_data_dir = '.'
manifest_file_name = os.path.join(manifest_dir, 'skim_manifest.csv')
dest_file_name = os.path.join(dest_data_dir, 'skims.omx')
with omx.open_file(dest_file_name, 'a') as dest_omx:
manifest = read_manifest(manifest_file_name)
for row in manifest.itertuples(index=True):
source_file_name = os.path.join(source_data_dir, row.source_file_name)
if row.skim_key2:
dest_key = row.skim_key1 + '__' + row.skim_key2
else:
dest_key = row.skim_key1
print "Reading '%s' from '%s' in %s" % (dest_key, row.source_key, source_file_name)
with omx.open_file(source_file_name, 'r') as source_omx:
if row.source_key not in source_omx.list_matrices():
print "Source matrix with key '%s' not found in file '%s" \
% (row.source_key, source_file_name,)
print source_omx.list_matrices()
raise RuntimeError("Source matrix with key '%s' not found in file '%s"
% (row.source_key, dest_omx,))
data = source_omx[row.source_key]
if dest_key in dest_omx.list_matrices():
print "deleting existing dest key '%s'" % (dest_key,)
dest_omx.removeNode(dest_omx.root.data, dest_key)
dest_omx[dest_key] = data
| {
"repo_name": "UDST/activitysim",
"path": "scripts/build_omx.py",
"copies": "2",
"size": "2492",
"license": "bsd-3-clause",
"hash": 1116000736664617100,
"line_mean": 29.3902439024,
"line_max": 92,
"alpha_frac": 0.5862760835,
"autogenerated": false,
"ratio": 3.270341207349081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4856617290849081,
"avg_score": null,
"num_lines": null
} |
# ActivitySim
# See full license in LICENSE.txt.
from __future__ import (absolute_import, division, print_function, )
from future.standard_library import install_aliases
install_aliases() # noqa: E402
import logging
from activitysim.core.util import assign_in_place
logger = logging.getLogger(__name__)
def failed_trip_cohorts(trips, failed):
# outbound trips in a tour with a failed outbound trip
bad_outbound_trips = \
trips.outbound & (trips.tour_id.isin(trips.tour_id[failed & trips.outbound]))
# inbound trips in a tour with a failed inbound trip
bad_inbound_trips = \
~trips.outbound & (trips.tour_id.isin(trips.tour_id[failed & ~trips.outbound]))
bad_trips = bad_outbound_trips | bad_inbound_trips
return bad_trips
def flag_failed_trip_leg_mates(trips_df, col_name):
"""
set boolean flag column of specified name to identify failed trip leg_mates in place
"""
failed_trip_leg_mates = failed_trip_cohorts(trips_df, trips_df.failed) & ~trips_df.failed
trips_df.loc[failed_trip_leg_mates, col_name] = True
# handle outbound and inbound legs independently
# for ob in [True, False]:
# same_leg = (trips_df.outbound == ob)
# # tour_ids of all tours with a failed trip in this (outbound or inbound) leg direction
# bad_tours = trips_df.tour_id[trips_df.failed & same_leg].unique()
# # not-failed leg_mates of all failed trips in this (outbound or inbound) leg direction
# failed_trip_leg_mates = same_leg & (trips_df.tour_id.isin(bad_tours)) & ~trips_df.failed
# # set the flag column
# trips_df.loc[failed_trip_leg_mates, col_name] = True
def cleanup_failed_trips(trips):
"""
drop failed trips and cleanup fields in leg_mates:
trip_num assign new ordinal trip num after failed trips are dropped
trip_count assign new count of trips in leg, sans failed trips
first update first flag as we may have dropped first trip (last trip can't fail)
next_trip_id assign id of next trip in leg after failed trips are dropped
"""
if trips.failed.any():
logger.warning("cleanup_failed_trips dropping %s failed trips" % trips.failed.sum())
trips['patch'] = False
flag_failed_trip_leg_mates(trips, 'patch')
# drop the original failures
trips = trips[~trips.failed]
# increasing trip_id order
patch_trips = trips[trips.patch].sort_index()
# recompute fields dependent on trip_num sequence
grouped = patch_trips.groupby(['tour_id', 'outbound'])
patch_trips['trip_num'] = grouped.cumcount() + 1
patch_trips['trip_count'] = patch_trips['trip_num'] + grouped.cumcount(ascending=False)
assign_in_place(trips, patch_trips[['trip_num', 'trip_count']])
del trips['patch']
del trips['failed']
return trips
| {
"repo_name": "UDST/activitysim",
"path": "activitysim/abm/models/util/trip.py",
"copies": "2",
"size": "2902",
"license": "bsd-3-clause",
"hash": -8974084020301307000,
"line_mean": 34.3902439024,
"line_max": 98,
"alpha_frac": 0.6647139904,
"autogenerated": false,
"ratio": 3.335632183908046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5000346174308046,
"avg_score": null,
"num_lines": null
} |
# ActivitySim
# See full license in LICENSE.txt.
from builtins import range
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from .. import timetable as tt
@pytest.fixture
def persons():
df = pd.DataFrame(
index=list(range(6))
)
return df
@pytest.fixture
def tdd_alts():
alts = pd.DataFrame(
data=[
[5, 5], [5, 6], [5, 7], [5, 8], [5, 9], [5, 10],
[6, 6], [6, 7], [6, 8], [6, 9], [6, 10],
[7, 7], [7, 8], [7, 9], [7, 10],
[8, 8], [8, 9], [8, 10],
[9, 9], [9, 10],
[10, 10],
],
columns=['start', 'end'])
alts['duration'] = alts.end - alts.start
return alts
def test_basic(persons, tdd_alts):
person_windows = tt.create_timetable_windows(persons, tdd_alts)
timetable = tt.TimeTable(person_windows, tdd_alts, 'person_windows')
# print "\ntdd_footprints_df\n", timetable.tdd_footprints_df
# 0 1 2 3 4 5 6 7
# 0 0 6 0 0 0 0 0 0
# 1 0 2 4 0 0 0 0 0
# 2 0 2 7 4 0 0 0 0
# 3 0 2 7 7 4 0 0 0
# 4 0 2 7 7 7 4 0 0
# 5 0 2 7 7 7 7 4 0
# 6 0 0 6 0 0 0 0 0
# 7 0 0 2 4 0 0 0 0
# 8 0 0 2 7 4 0 0 0
# 9 0 0 2 7 7 4 0 0
# 10 0 0 2 7 7 7 4 0
# 11 0 0 0 6 0 0 0 0
# 12 0 0 0 2 4 0 0 0
# 13 0 0 0 2 7 4 0 0
# 14 0 0 0 2 7 7 4 0
# 15 0 0 0 0 6 0 0 0
# 16 0 0 0 0 2 4 0 0
# 17 0 0 0 0 2 7 4 0
# 18 0 0 0 0 0 6 0 0
# 19 0 0 0 0 0 2 4 0
# 20 0 0 0 0 0 0 6 0
num_alts = len(tdd_alts.index)
num_persons = len(persons.index)
person_ids = pd.Series(list(range(num_persons))*num_alts)
tdds = pd.Series(np.repeat(list(range(num_alts)), num_persons))
assert timetable.tour_available(person_ids, tdds).all()
person_ids = pd.Series([0, 1, 2, 3, 4, 5])
tdds = pd.Series([0, 1, 2, 15, 16, 17])
timetable.assign(person_ids, tdds)
# print "\nupdated_person_windows\n", timetable.get_person_windows_df()
# 4 5 6 7 8 9 10 11
# 0 0 6 0 0 0 0 0 0
# 1 0 2 4 0 0 0 0 0
# 2 0 2 7 4 0 0 0 0
# 3 0 0 0 0 6 0 0 0
# 4 0 0 0 0 2 4 0 0
# 5 0 0 0 0 2 7 4 0
person_ids = pd.Series([0, 1, 1, 0, 1, 3, 4])
tdds = pd.Series([
0, # tdd START_END does not collide with START_END
0, # tdd START_END does not collide with START
6, # tdd START_END does not collide with END
1, # tdd START does not collide with START_END
7, # tdd START does not collide with END
3, # tdd END does not collide with START_END
3, # tdd END does not collide with START
])
assert timetable.tour_available(person_ids, tdds).all()
# print "\nupdated_person_windows\n", timetable.get_person_windows_df()
# 4 5 6 7 8 9 10 11
# 0 0 6 0 0 0 0 0 0
# 1 0 2 4 0 0 0 0 0
# 2 0 2 7 4 0 0 0 0
# 3 0 0 0 0 6 0 0 0
# 4 0 0 0 0 2 4 0 0
# 5 0 0 0 0 2 7 4 0
person_ids = pd.Series([1, 5, 2, 2])
tdds = pd.Series([
1, # tdd START + END collides with START + END
17, # START + MIDDLE + END collides with same
6, # tdd START_END collides with MIDDLE
1, # tdd START + END collides with START + MIDDLE
])
assert not timetable.tour_available(person_ids, tdds).any()
# ensure that tour_available handles heterogeneous results
person_ids = pd.Series([0, 1, 1, 5])
tdds = pd.Series([
0, # tdd START_END does not collide with START_END
0, # tdd START_END does not collide with START
1, # tdd START + END collides with START + END
17, # START + MIDDLE + END collides with same
])
pdt.assert_series_equal(timetable.tour_available(person_ids, tdds),
pd.Series([True, True, False, False], index=person_ids.index))
# assigning overlapping trip END,START should convert END to START_END
person_ids = pd.Series([2])
tdds = pd.Series([13])
assert timetable.tour_available(person_ids, tdds).all()
assert timetable.windows[2, 3] == tt.I_END
timetable.assign(person_ids, tdds)
assert timetable.windows[2, 3] == tt.I_START_END
# print "\nupdated_person_windows\n", timetable.get_person_windows_df()
# 4 5 6 7 8 9 10 11
# 0 0 6 0 0 0 0 0 0
# 1 0 2 4 0 0 0 0 0
# 2 0 2 7 6 7 4 0 0
# 3 0 0 0 0 6 0 0 0
# 4 0 0 0 0 2 4 0 0
# 5 0 0 0 0 2 7 4 0
# - previous_tour_ends
person_ids = pd.Series([0, 1, 2, 3, 4, 5, 2])
periods = pd.Series([5, 6, 9, 8, 9, 10, 7])
assert timetable.previous_tour_ends(person_ids, periods).all()
person_ids = pd.Series([0, 1, 2])
periods = pd.Series([9, 5, 8])
assert not timetable.previous_tour_ends(person_ids, periods).any()
# - previous_tour_begins
person_ids = pd.Series([0, 1, 2, 3, 4, 5, 2])
periods = pd.Series([5, 5, 5, 8, 8, 8, 7])
assert timetable.previous_tour_begins(person_ids, periods).all()
person_ids = pd.Series([0, 1, 2])
periods = pd.Series([9, 6, 8])
assert not timetable.previous_tour_begins(person_ids, periods).any()
# - adjacent_window_after
person_ids = pd.Series([0, 1, 2, 3, 4, 5])
periods = pd.Series([5, 5, 5, 5, 5, 5])
adjacent_run_length = timetable.adjacent_window_after(person_ids, periods)
pdt.assert_series_equal(adjacent_run_length, pd.Series([5, 5, 0, 5, 5, 3]))
# - adjacent_window_before
person_ids = pd.Series([0, 1, 2, 3, 4, 5])
periods = pd.Series([10, 10, 10, 10, 10, 10])
adjacent_run_length = timetable.adjacent_window_before(person_ids, periods)
pdt.assert_series_equal(adjacent_run_length, pd.Series([5, 5, 1, 5, 5, 0]))
# - remaining_periods_available
person_ids = pd.Series([0, 1, 2, 3])
starts = pd.Series([9, 6, 9, 5])
ends = pd.Series([10, 10, 10, 9])
periods_available = timetable.remaining_periods_available(person_ids, starts, ends)
pdt.assert_series_equal(periods_available, pd.Series([6, 3, 4, 3]))
| {
"repo_name": "UDST/activitysim",
"path": "activitysim/core/test/test_timetable.py",
"copies": "2",
"size": "6349",
"license": "bsd-3-clause",
"hash": 7026654858939826000,
"line_mean": 33.1344086022,
"line_max": 90,
"alpha_frac": 0.5336273429,
"autogenerated": false,
"ratio": 2.5914285714285716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4125055914328572,
"avg_score": null,
"num_lines": null
} |
# ActivitySim
# See full license in LICENSE.txt.
from __future__ import (absolute_import, division, print_function, )
from future.standard_library import install_aliases
install_aliases() # noqa: E402
from builtins import next
from builtins import map
from builtins import object
from future.utils import iteritems
import os
import logging
import datetime as dt
import pandas as pd
from . import orca
from . import inject
from . import config
from . import random
from . import tracing
from . import mem
from . import util
from .tracing import print_elapsed_time
logger = logging.getLogger(__name__)
# name of the checkpoint dict keys
# (which are also columns in the checkpoints dataframe stored in hte pipeline store)
TIMESTAMP = 'timestamp'
CHECKPOINT_NAME = 'checkpoint_name'
NON_TABLE_COLUMNS = [CHECKPOINT_NAME, TIMESTAMP]
# name used for storing the checkpoints dataframe to the pipeline store
CHECKPOINT_TABLE_NAME = 'checkpoints'
# name of the first step/checkpoint created when teh pipeline is started
INITIAL_CHECKPOINT_NAME = 'init'
# special value for resume_after meaning last checkpoint
LAST_CHECKPOINT = '_'
# single character prefix for run_list model name to indicate that no checkpoint should be saved
NO_CHECKPOINT_PREFIX = '_'
class Pipeline(object):
def __init__(self):
self.init_state()
def init_state(self):
# most recent checkpoint
self.last_checkpoint = {}
# array of checkpoint dicts
self.checkpoints = []
self.replaced_tables = {}
self._rng = random.Random()
self.open_files = {}
self.pipeline_store = None
self.is_open = False
def rng(self):
return self._rng
_PIPELINE = Pipeline()
def be_open():
if not _PIPELINE.is_open:
raise RuntimeError("Pipeline is not open!")
def pipeline_table_key(table_name, checkpoint_name):
if checkpoint_name:
key = "%s/%s" % (table_name, checkpoint_name)
else:
key = table_name
return key
def close_on_exit(file, name):
assert name not in _PIPELINE.open_files
_PIPELINE.open_files[name] = file
def close_open_files():
for name, file in iteritems(_PIPELINE.open_files):
print("Closing %s" % name)
file.close()
_PIPELINE.open_files.clear()
def open_pipeline_store(overwrite=False):
"""
Open the pipeline checkpoint store
Parameters
----------
overwrite : bool
delete file before opening (unless resuming)
"""
if _PIPELINE.pipeline_store is not None:
raise RuntimeError("Pipeline store is already open!")
pipeline_file_path = config.pipeline_file_path(inject.get_injectable('pipeline_file_name'))
if overwrite:
try:
if os.path.isfile(pipeline_file_path):
logger.debug("removing pipeline store: %s" % pipeline_file_path)
os.unlink(pipeline_file_path)
except Exception as e:
print(e)
logger.warning("Error removing %s: %s" % (pipeline_file_path, e))
_PIPELINE.pipeline_store = pd.HDFStore(pipeline_file_path, mode='a')
logger.debug("opened pipeline_store")
def get_pipeline_store():
"""
Return the open pipeline hdf5 checkpoint store or return None if it not been opened
"""
return _PIPELINE.pipeline_store
def get_rn_generator():
"""
Return the singleton random number object
Returns
-------
activitysim.random.Random
"""
return _PIPELINE.rng()
def read_df(table_name, checkpoint_name=None):
"""
Read a pandas dataframe from the pipeline store.
We store multiple versions of all simulation tables, for every checkpoint in which they change,
so we need to know both the table_name and the checkpoint_name of hte desired table.
The only exception is the checkpoints dataframe, which just has a table_name
An error will be raised by HDFStore if the table is not found
Parameters
----------
table_name : str
checkpoint_name : str
Returns
-------
df : pandas.DataFrame
the dataframe read from the store
"""
store = get_pipeline_store()
df = store[pipeline_table_key(table_name, checkpoint_name)]
return df
def write_df(df, table_name, checkpoint_name=None):
"""
Write a pandas dataframe to the pipeline store.
We store multiple versions of all simulation tables, for every checkpoint in which they change,
so we need to know both the table_name and the checkpoint_name to label the saved table
The only exception is the checkpoints dataframe, which just has a table_name
Parameters
----------
df : pandas.DataFrame
dataframe to store
table_name : str
also conventionally the orca table name
checkpoint_name : str
the checkpoint at which the table was created/modified
"""
# coerce column names to str as unicode names will cause PyTables to pickle them
df.columns = df.columns.astype(str)
store = get_pipeline_store()
store[pipeline_table_key(table_name, checkpoint_name)] = df
store.flush()
def rewrap(table_name, df=None):
"""
Add or replace an orca registered table as a unitary DataFrame-backed DataFrameWrapper table
if df is None, then get the dataframe from orca (table_name should be registered, or
an error will be thrown) which may involve evaluating added columns, etc.
If the orca table already exists, deregister it along with any associated columns before
re-registering it.
The net result is that the dataframe is a registered orca DataFrameWrapper table with no
computed or added columns.
Parameters
----------
table_name
df
Returns
-------
the underlying df of the rewrapped table
"""
logger.debug("rewrap table %s inplace=%s" % (table_name, (df is None)))
if orca.is_table(table_name):
if df is None:
# logger.debug("rewrap - orca.get_table(%s)" % (table_name,))
t = orca.get_table(table_name)
df = t.to_frame()
else:
# logger.debug("rewrap - orca.get_raw_table(%s)" % (table_name,))
# don't trigger function call of TableFuncWrapper
t = orca.get_raw_table(table_name)
t.clear_cached()
for column_name in orca.list_columns_for_table(table_name):
# logger.debug("pop %s.%s: %s" % (table_name, column_name, t.column_type(column_name)))
# fixme
orca._COLUMNS.pop((table_name, column_name), None)
# remove from orca's table list
orca._TABLES.pop(table_name, None)
assert df is not None
orca.add_table(table_name, df)
return df
def add_checkpoint(checkpoint_name):
"""
Create a new checkpoint with specified name, write all data required to restore the simulation
to its current state.
Detect any changed tables , re-wrap them and write the current version to the pipeline store.
Write the current state of the random number generator.
Parameters
----------
checkpoint_name : str
"""
timestamp = dt.datetime.now()
logger.debug("add_checkpoint %s timestamp %s" % (checkpoint_name, timestamp))
for table_name in orca_dataframe_tables():
# if we have not already checkpointed it or it has changed
# FIXME - this won't detect if the orca table was modified
if len(orca.list_columns_for_table(table_name)):
# rewrap the changed orca table as a unitary DataFrame-backed DataFrameWrapper table
df = rewrap(table_name)
elif table_name not in _PIPELINE.last_checkpoint or table_name in _PIPELINE.replaced_tables:
df = orca.get_table(table_name).to_frame()
else:
continue
logger.debug("add_checkpoint '%s' table '%s' %s" %
(checkpoint_name, table_name, util.df_size(df)))
write_df(df, table_name, checkpoint_name)
# remember which checkpoint it was last written
_PIPELINE.last_checkpoint[table_name] = checkpoint_name
_PIPELINE.replaced_tables.clear()
_PIPELINE.last_checkpoint[CHECKPOINT_NAME] = checkpoint_name
_PIPELINE.last_checkpoint[TIMESTAMP] = timestamp
# append to the array of checkpoint history
_PIPELINE.checkpoints.append(_PIPELINE.last_checkpoint.copy())
# create a pandas dataframe of the checkpoint history, one row per checkpoint
checkpoints = pd.DataFrame(_PIPELINE.checkpoints)
# convert empty values to str so PyTables doesn't pickle object types
for c in checkpoints.columns:
checkpoints[c] = checkpoints[c].fillna('')
# write it to the store, overwriting any previous version (no way to simply extend)
write_df(checkpoints, CHECKPOINT_TABLE_NAME)
def orca_dataframe_tables():
"""
Return a list of the neames of all currently registered dataframe tables
"""
return [name for name in orca.list_tables() if orca.table_type(name) == 'dataframe']
def checkpointed_tables():
"""
Return a list of the names of all checkpointed tables
"""
return [name for name, checkpoint_name in iteritems(_PIPELINE.last_checkpoint)
if checkpoint_name and name not in NON_TABLE_COLUMNS]
def load_checkpoint(checkpoint_name):
"""
Load dataframes and restore random number channel state from pipeline hdf5 file.
This restores the pipeline state that existed at the specified checkpoint in a prior simulation.
This allows us to resume the simulation after the specified checkpoint
Parameters
----------
checkpoint_name : str
model_name of checkpoint to load (resume_after argument to open_pipeline)
"""
logger.info("load_checkpoint %s" % (checkpoint_name))
checkpoints = read_df(CHECKPOINT_TABLE_NAME)
if checkpoint_name == LAST_CHECKPOINT:
checkpoint_name = checkpoints[CHECKPOINT_NAME].iloc[-1]
logger.info("loading checkpoint '%s'" % checkpoint_name)
try:
# truncate rows after target checkpoint
i = checkpoints[checkpoints[CHECKPOINT_NAME] == checkpoint_name].index[0]
checkpoints = checkpoints.loc[:i]
except IndexError:
msg = "Couldn't find checkpoint '%s' in checkpoints" % (checkpoint_name,)
print(checkpoints[CHECKPOINT_NAME])
logger.error(msg)
raise RuntimeError(msg)
# convert pandas dataframe back to array of checkpoint dicts
checkpoints = checkpoints.to_dict(orient='records')
# drop tables with empty names
for checkpoint in checkpoints:
for key in list(checkpoint.keys()):
if key not in NON_TABLE_COLUMNS and not checkpoint[key]:
del checkpoint[key]
# patch _CHECKPOINTS array of dicts
_PIPELINE.checkpoints = checkpoints
# patch _CHECKPOINTS dict with latest checkpoint info
_PIPELINE.last_checkpoint.clear()
_PIPELINE.last_checkpoint.update(_PIPELINE.checkpoints[-1])
logger.info("load_checkpoint %s timestamp %s"
% (checkpoint_name, _PIPELINE.last_checkpoint['timestamp']))
tables = checkpointed_tables()
loaded_tables = {}
for table_name in tables:
# read dataframe from pipeline store
df = read_df(table_name, checkpoint_name=_PIPELINE.last_checkpoint[table_name])
logger.info("load_checkpoint table %s %s" % (table_name, df.shape))
# register it as an orca table
rewrap(table_name, df)
loaded_tables[table_name] = df
# register for tracing in order that tracing.register_traceable_table wants us to register them
traceable_tables = inject.get_injectable('traceable_tables', [])
for table_name in traceable_tables:
if table_name in loaded_tables:
tracing.register_traceable_table(table_name, loaded_tables[table_name])
# add tables of known rng channels
rng_channels = inject.get_injectable('rng_channels', [])
if rng_channels:
logger.debug("loading random channels %s" % rng_channels)
for table_name in rng_channels:
if table_name in loaded_tables:
logger.debug("adding channel %s" % (table_name,))
_PIPELINE.rng().add_channel(table_name, loaded_tables[table_name])
def split_arg(s, sep, default=''):
"""
split str s in two at first sep, returning empty string as second result if no sep
"""
r = s.split(sep, 2)
r = list(map(str.strip, r))
arg = r[0]
if len(r) == 1:
val = default
else:
val = r[1]
val = {'true': True, 'false': False}.get(val.lower(), val)
return arg, val
def run_model(model_name):
"""
Run the specified model and add checkpoint for model_name
Since we use model_name as checkpoint name, the same model may not be run more than once.
Parameters
----------
model_name : str
model_name is assumed to be the name of a registered orca step
"""
if not _PIPELINE.is_open:
raise RuntimeError("Pipeline not initialized! Did you call open_pipeline?")
# can't run same model more than once
if model_name in [checkpoint[CHECKPOINT_NAME] for checkpoint in _PIPELINE.checkpoints]:
raise RuntimeError("Cannot run model '%s' more than once" % model_name)
_PIPELINE.rng().begin_step(model_name)
# check for args
if '.' in model_name:
step_name, arg_string = model_name.split('.', 1)
args = dict((k, v)
for k, v in (split_arg(item, "=", default=True)
for item in arg_string.split(";")))
else:
step_name = model_name
args = {}
# check for no_checkpoint prefix
if step_name[0] == NO_CHECKPOINT_PREFIX:
step_name = step_name[1:]
checkpoint = False
else:
checkpoint = True
inject.set_step_args(args)
t0 = print_elapsed_time()
orca.run([step_name])
t0 = print_elapsed_time("run_model step '%s'" % model_name, t0, debug=True)
inject.set_step_args(None)
_PIPELINE.rng().end_step(model_name)
if checkpoint:
add_checkpoint(model_name)
t0 = print_elapsed_time("run_model add_checkpoint '%s'" % model_name, t0, debug=True)
else:
logger.info("##### skipping %s checkpoint for %s" % (step_name, model_name))
def open_pipeline(resume_after=None):
"""
Start pipeline, either for a new run or, if resume_after, loading checkpoint from pipeline.
If resume_after, then we expect the pipeline hdf5 file to exist and contain
checkpoints from a previous run, including a checkpoint with name specified in resume_after
Parameters
----------
resume_after : str or None
name of checkpoint to load from pipeline store
"""
logger.info("open_pipeline")
if _PIPELINE.is_open:
raise RuntimeError("Pipeline is already open!")
_PIPELINE.init_state()
_PIPELINE.is_open = True
get_rn_generator().set_base_seed(inject.get_injectable('rng_base_seed', 0))
if resume_after:
# open existing pipeline
logger.debug("open_pipeline - open existing pipeline")
open_pipeline_store(overwrite=False)
load_checkpoint(resume_after)
else:
# open new, empty pipeline
logger.debug("open_pipeline - new, empty pipeline")
open_pipeline_store(overwrite=True)
# - not sure why I thought we needed this?
# could have exogenous tables or prng instantiation under some circumstance??
_PIPELINE.last_checkpoint[CHECKPOINT_NAME] = INITIAL_CHECKPOINT_NAME
# add_checkpoint(INITIAL_CHECKPOINT_NAME)
logger.debug("open_pipeline complete")
def last_checkpoint():
"""
Returns
-------
last_checkpoint: str
name of last checkpoint
"""
be_open()
return _PIPELINE.last_checkpoint[CHECKPOINT_NAME]
def close_pipeline():
"""
Close any known open files
"""
be_open()
close_open_files()
_PIPELINE.pipeline_store.close()
_PIPELINE.init_state()
logger.info("close_pipeline")
def run(models, resume_after=None):
"""
run the specified list of models, optionally loading checkpoint and resuming after specified
checkpoint.
Since we use model_name as checkpoint name, the same model may not be run more than once.
If resume_after checkpoint is specified and a model with that name appears in the models list,
then we only run the models after that point in the list. This allows the user always to pass
the same list of models, but specify a resume_after point if desired.
Parameters
----------
models : [str]
list of model_names
resume_after : str or None
model_name of checkpoint to load checkpoint and AFTER WHICH to resume model run
"""
t0 = print_elapsed_time()
open_pipeline(resume_after)
t0 = print_elapsed_time('open_pipeline', t0)
if resume_after == LAST_CHECKPOINT:
resume_after = _PIPELINE.last_checkpoint[CHECKPOINT_NAME]
if resume_after:
logger.info("resume_after %s" % resume_after)
if resume_after in models:
models = models[models.index(resume_after) + 1:]
# preload any bulky injectables (e.g. skims) not in pipeline
if orca.is_injectable('preload_injectables'):
orca.get_injectable('preload_injectables')
t0 = print_elapsed_time('preload_injectables', t0)
t0 = print_elapsed_time()
for model in models:
run_model(model)
t0 = print_elapsed_time("run_model (%s models)" % len(models), t0)
# don't close the pipeline, as the user may want to read intermediate results from the store
def get_table(table_name, checkpoint_name=None):
"""
Return pandas dataframe corresponding to table_name
if checkpoint_name is None, return the current (most recent) version of the table.
The table can be a checkpointed table or any registered orca table (e.g. function table)
if checkpoint_name is specified, return table as it was at that checkpoint
(the most recently checkpointed version of the table at or before checkpoint_name)
Parameters
----------
table_name : str
checkpoint_name : str or None
Returns
-------
df : pandas.DataFrame
"""
be_open()
# orca table not in checkpoints (e.g. a merged table)
if table_name not in _PIPELINE.last_checkpoint and orca.is_table(table_name):
if checkpoint_name is not None:
raise RuntimeError("get_table: checkpoint_name ('%s') not supported"
"for non-checkpointed table '%s'" % (checkpoint_name, table_name))
return orca.get_table(table_name).to_frame()
# if they want current version of table, no need to read from pipeline store
if checkpoint_name is None:
if table_name not in _PIPELINE.last_checkpoint:
raise RuntimeError("table '%s' never checkpointed." % table_name)
if not _PIPELINE.last_checkpoint[table_name]:
raise RuntimeError("table '%s' was dropped." % table_name)
# return orca.get_table(table_name).local
return orca.get_table(table_name).to_frame()
# find the requested checkpoint
checkpoint = \
next((x for x in _PIPELINE.checkpoints if x['checkpoint_name'] == checkpoint_name), None)
if checkpoint is None:
raise RuntimeError("checkpoint '%s' not in checkpoints." % checkpoint_name)
# find the checkpoint that table was written to store
last_checkpoint_name = checkpoint.get(table_name, None)
if not last_checkpoint_name:
raise RuntimeError("table '%s' not in checkpoint '%s'." % (table_name, checkpoint_name))
# if this version of table is same as current
if _PIPELINE.last_checkpoint.get(table_name, None) == last_checkpoint_name:
return orca.get_table(table_name).to_frame()
return read_df(table_name, last_checkpoint_name)
def get_checkpoints():
"""
Get pandas dataframe of info about all checkpoints stored in pipeline
pipeline doesn't have to be open
Returns
-------
checkpoints_df : pandas.DataFrame
"""
store = get_pipeline_store()
if store:
df = store[CHECKPOINT_TABLE_NAME]
else:
pipeline_file_path = config.pipeline_file_path(orca.get_injectable('pipeline_file_name'))
df = pd.read_hdf(pipeline_file_path, CHECKPOINT_TABLE_NAME)
# non-table columns first (column order in df is random because created from a dict)
table_names = [name for name in df.columns.values if name not in NON_TABLE_COLUMNS]
df = df[NON_TABLE_COLUMNS + table_names]
return df
def replace_table(table_name, df):
"""
Add or replace a orca table, removing any existing added orca columns
The use case for this function is a method that calls to_frame on an orca table, modifies
it and then saves the modified.
orca.to_frame returns a copy, so no changes are saved, and adding multiple column with
add_column adds them in an indeterminate order.
Simply replacing an existing the table "behind the pipeline's back" by calling orca.add_table
risks pipeline to failing to detect that it has changed, and thus not checkpoint the changes.
Parameters
----------
table_name : str
orca/pipeline table name
df : pandas DataFrame
"""
be_open()
rewrap(table_name, df)
_PIPELINE.replaced_tables[table_name] = True
def extend_table(table_name, df, axis=0):
"""
add new table or extend (add rows) to an existing table
Parameters
----------
table_name : str
orca/inject table name
df : pandas DataFrame
"""
be_open()
assert axis in [0, 1]
if orca.is_table(table_name):
table_df = orca.get_table(table_name).to_frame()
if axis == 0:
# don't expect indexes to overlap
assert len(table_df.index.intersection(df.index)) == 0
missing_df_str_columns = [c for c in table_df.columns
if c not in df.columns and table_df[c].dtype == 'O']
else:
# expect indexes be same
assert table_df.index.equals(df.index)
new_df_columns = [c for c in df.columns if c not in table_df.columns]
df = df[new_df_columns]
# preserve existing column order
df = pd.concat([table_df, df], sort=False, axis=axis)
# backfill missing df columns that were str (object) type in table_df
if axis == 0:
for c in missing_df_str_columns:
df[c] = df[c].fillna('')
replace_table(table_name, df)
return df
def drop_table(table_name):
be_open()
if orca.is_table(table_name):
logger.debug("drop_table dropping orca table '%s'" % table_name)
# don't trigger function call of TableFuncWrapper
t = orca.get_raw_table(table_name)
t.clear_cached()
for column_name in orca.list_columns_for_table(table_name):
# logger.debug("pop %s.%s: %s" % (table_name, column_name, t.column_type(column_name)))
orca._COLUMNS.pop((table_name, column_name), None)
# remove from orca's table list
orca._TABLES.pop(table_name, None)
if table_name in _PIPELINE.replaced_tables:
logger.debug("drop_table forgetting replaced_tables '%s'" % table_name)
del _PIPELINE.replaced_tables[table_name]
if table_name in _PIPELINE.last_checkpoint:
logger.debug("drop_table removing table %s from last_checkpoint" % table_name)
_PIPELINE.last_checkpoint[table_name] = ''
| {
"repo_name": "UDST/activitysim",
"path": "activitysim/core/pipeline.py",
"copies": "2",
"size": "23709",
"license": "bsd-3-clause",
"hash": -5751523121450640000,
"line_mean": 29.0875634518,
"line_max": 100,
"alpha_frac": 0.6529166139,
"autogenerated": false,
"ratio": 3.9720221142569945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5624938728156994,
"avg_score": null,
"num_lines": null
} |
# ActivitySim
# See full license in LICENSE.txt.
import numpy as np
import pandas as pd
import openmatrix as omx
input_folder = "/Users/jeff.doyle/work/activitysim-data/sandag_zone/output/"
output_folder = "./output/"
data_file = 'NetworkData.h5'
skim_files = ['taz_skims.omx', 'tap_skims_locl.omx', 'tap_skims_prem.omx']
if __name__ == "__main__":
if data_file:
with pd.HDFStore(input_folder+data_file, mode='r') as hdf:
df = hdf['/TAZ']
df.to_csv(output_folder+'taz.csv', index=True)
df = hdf['/TAP']
df.to_csv(output_folder+'tap.csv', index=True)
for key in hdf.keys():
print "\n========== %s\n" % key
df = hdf[key]
print "len", len(df.index)
print df.columns.values
for c in ['TAZ', 'TAP', 'MAZ', 'OMAZ', 'DMAZ']:
if c in df.columns:
print "%s min: %s max: %s" % (c, df[c].min(), df[c].max())
if 'TAZ'in df.columns:
print df.TAZ.value_counts().head(20)
# print df
# process all skims
for skim_file in skim_files:
with omx.open_file(input_folder+skim_file) as skims:
# skims = omx.open_file(folder+skim_file)
print "\n##### %s %s" % (skim_file, skims.shape())
print "mappings:", skims.listMappings()
skimsToProcess = skims.listMatrices()
for skimName in skimsToProcess:
print skimName
# skims.close()
| {
"repo_name": "UDST/activitysim",
"path": "example_multiple_zone/dump_data.py",
"copies": "2",
"size": "1574",
"license": "bsd-3-clause",
"hash": -7496693926704565000,
"line_mean": 27.6181818182,
"line_max": 82,
"alpha_frac": 0.5184243964,
"autogenerated": false,
"ratio": 3.2122448979591836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9727243001776265,
"avg_score": 0.0006852585165838178,
"num_lines": 55
} |
# ActivitySim
# See full license in LICENSE.txt.
import os
import numpy as np
import pandas as pd
import openmatrix as omx
import sys
# currently hdf5 written with python3 works with both p2.7 and p3,
# but reading hdf5 built with p2.7 (tables==3.4.4) p3 throws a ValueError reading land_use_taz:
# ValueError: Buffer dtype mismatch, expected 'Python object' but got 'double'
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
# input files, SF county is zones 1 to 190, output files
source_store = "/Users/jeff.doyle/work/activitysim-data/mtc_tm1/data/mtc_asim.h5"
source_skims = '/Users/jeff.doyle/work/activitysim-data/mtc_tm1/data/skims.omx'
dest_data_dir = "/Users/jeff.doyle/work/activitysim-data"
def create_subset(dest_store, dest_skims, maxZone, households_sample_size=0):
dest_store_path = os.path.join(dest_data_dir, dest_store)
dest_skims_path = os.path.join(dest_data_dir, dest_skims)
print('land_use_taz')
df = pd.read_hdf(source_store, 'land_use_taz')
df = df[df.index <= maxZone]
df.to_hdf(dest_store_path, 'land_use_taz')
del df
print('households')
hh_df = pd.read_hdf(source_store, 'households')
hh_df = hh_df[hh_df.TAZ <= maxZone]
if households_sample_size:
hh_df = hh_df.take(np.random.choice(len(hh_df), size=households_sample_size, replace=False))
hh_df.to_hdf(dest_store_path, 'households')
print('persons')
per_df = pd.read_hdf(source_store, 'persons')
per_df = per_df[per_df.household_id.isin(hh_df.index)]
per_df.to_hdf(dest_store_path, 'persons')
# process all skims
skims = omx.open_file(source_skims)
skims_out = omx.open_file(dest_skims_path, 'w')
skimsToProcess = skims.list_matrices()
for skimName in skimsToProcess:
print(skimName)
skims_out[skimName] = skims[skimName][0:maxZone, 0:maxZone]
skims_out[skimName].attrs.TITLE = '' # remove funny character for OMX viewer
create_subset(dest_store='mtc_tm1_sf/data/mtc_asim.h5',
dest_skims='mtc_tm1_sf/data/skims.omx',
maxZone=190
)
create_subset(dest_store='mtc_tm1_test/data/mtc_asim.h5',
dest_skims='mtc_tm1_test/data/skims.omx',
maxZone=25,
households_sample_size=5000
)
| {
"repo_name": "UDST/activitysim",
"path": "scripts/create_sf_example.py",
"copies": "2",
"size": "2310",
"license": "bsd-3-clause",
"hash": -8834665492518320000,
"line_mean": 32.4782608696,
"line_max": 100,
"alpha_frac": 0.667965368,
"autogenerated": false,
"ratio": 2.773109243697479,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9437876011284896,
"avg_score": 0.0006397200825165255,
"num_lines": 69
} |
"""Activity views."""
import django_filters
from django.conf.urls import url
from django.core.urlresolvers import reverse_lazy
from django.views.generic import CreateView, DetailView, UpdateView
from django_filters.views import FilterView
from .mixins import AJAXRedirectMixin
from ..forms import activity as activity_forms
from ..models.activity import Activity
class ActivityCreateView(CreateView):
"""Create an Activity."""
model = Activity
template_name = 'activity/create.html'
form_class = activity_forms.ActivityForm
def get_success_url(self):
"""Go to the Activity details view."""
return reverse_lazy('activity_details', args=[self.object.id])
class ActivityDetailView(DetailView):
"""Activity Details"""
model = Activity
template_name = 'activity/activity.html'
class ActivityInlineDetailView(DetailView):
"""Display Activity details in a table row."""
model = Activity
template_name = 'activity/inline_details.html'
class ActivityEditView(AJAXRedirectMixin, UpdateView):
"""Edit Activity details."""
model = Activity
template_name = 'activity/edit.html'
form_class = activity_forms.ActivityForm
def get_success_url(self):
"""Go to the Activity details view."""
return reverse_lazy('activity_details', args=[self.object.id])
class ActivityInlineEditView(AJAXRedirectMixin, UpdateView):
"""Display a form in a table row."""
model = Activity
template_name = 'activity/inline_edit.html'
fields = [
'id',
'short_description',
'long_description',
'to_dos',
'places',
]
def get_success_url(self):
"""Go to the Activity details view."""
return reverse_lazy('activity_inline_details', args=[self.object.id])
class ActivityFilter(django_filters.FilterSet):
"""Filter for Activities."""
all_choices = [('', '---------')]
id = django_filters.CharFilter( # pylint:disable=invalid-name
lookup_expr='icontains',
help_text='',
)
short_description = django_filters.CharFilter(
lookup_expr='icontains',
help_text='',
)
long_description = django_filters.CharFilter(
lookup_expr='icontains',
help_text='',
)
class Meta:
model = Activity
fields = ['id', 'short_description', 'long_description']
order_by = ['id']
class ActivityListView(AJAXRedirectMixin, FilterView):
"""List Activities and provide a filter."""
model = Activity
template_name = 'activity/filtered_list.html'
filterset_class = ActivityFilter
urlpatterns = [
url('^create$', ActivityCreateView.as_view(), name='activity_create'),
url(
r'^(?P<pk>\d+)/$',
ActivityDetailView.as_view(),
name='activity_details',
),
url(
r'^(?P<pk>\d+)/edit$',
ActivityEditView.as_view(),
name='activity_edit'
),
url(r'^list/$', ActivityListView.as_view(), name='activity_list'),
url(
r'^inline/(?P<pk>\d+)/$',
ActivityInlineDetailView.as_view(),
name='activity_inline_details',
),
url(
r'^inline/(?P<pk>\d+)/edit$',
ActivityInlineEditView.as_view(),
name='activity_inline_edit',
),
]
| {
"repo_name": "jricardo27/holiday_planner",
"path": "holiday_planner/holiday_place/views/activity.py",
"copies": "1",
"size": "3292",
"license": "bsd-3-clause",
"hash": 141970079466206700,
"line_mean": 24.1297709924,
"line_max": 77,
"alpha_frac": 0.6363912515,
"autogenerated": false,
"ratio": 4.064197530864197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 131
} |
"""actofgoods URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from . import views
app_name = 'administration'
urlpatterns = [
url(r'^administration/$', views.administration, name='administration'),
url(r'^administration/requests/$', views.requests, name='requests'),
url(r'^administration/categories/$', views.categories, name='categories'),
url(r'^administration/needs/$', views.needs, name='needs'),
url(r'^administration/information/$', views.informations, name='information'),
url(r'^administration/users/$', views.users, name='users'),
url(r'^administration/groups/$', views.groups, name='groups'),
url(r'^administration/faq_administration/$', views.faq_administration, name='faq_administration'),
url(r'^administration/users/(?P<pk>\d+)/delete/$', views.user_delete, name='user_delete'),
url(r'^administration/groups/(?P<pk>\d+)/delete/$', views.group_delete, name='group_delete'),
url(r'^administration/needs/(?P<pk>\d+)/delete/$', views.need_delete, name='need_delete'),
url(r'^administration/information/(?P<pk>\d+)/delete/$', views.information_delete, name='information_delete'),
url(r'^administration/users/(?P<pk>\d+)/make_admin/$', views.make_admin, name='make_admin'),
url(r'^administration/categories/(?P<pk>\d+)/delete/$', views.categories_delete, name='categories_delete'),
url(r'^administration/information/(?P<pk>\d+)/$', views.information_admin, name='information_admin'),
url(r'^administration/information/comment/delete/$', views.comment_delete, name='comment_delete'),
url(r'^administration/information/comment/(?P<pk>\d+)/delete/$', views.comment_delete, name='comment_delete'),
url(r'^administration/information/(?P<pki>\d+)/(?P<pkc>\d+)/$', views.information_reported_comment_admin, name='information_reported_comment_admin'),
]
| {
"repo_name": "actofgoods/actofgoods",
"path": "administration/urls.py",
"copies": "1",
"size": "2485",
"license": "mit",
"hash": -8970017133031198000,
"line_mean": 59.6097560976,
"line_max": 153,
"alpha_frac": 0.7018108652,
"autogenerated": false,
"ratio": 3.5550786838340485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47568895490340485,
"avg_score": null,
"num_lines": null
} |
"""Actor-Critic using TD-error as the Advantage, Reinforcement Learning.
Actor Critic History
----------------------
A3C > DDPG > AC
Advantage
----------
AC converge faster than Policy Gradient.
Disadvantage (IMPORTANT)
------------------------
The Policy is oscillated (difficult to converge), DDPG can solve
this problem using advantage of DQN.
Reference
----------
View more on MorvanZhou's tutorial page: https://morvanzhou.github.io/tutorials/
Environment
------------
CartPole-v0: https://gym.openai.com/envs/CartPole-v0
A pole is attached by an un-actuated joint to a cart, which moves along a
frictionless track. The system is controlled by applying a force of +1 or -1
to the cart. The pendulum starts upright, and the goal is to prevent it from
falling over.
A reward of +1 is provided for every timestep that the pole remains upright.
The episode ends when the pole is more than 15 degrees from vertical, or the
cart moves more than 2.4 units from the center.
"""
import time
import numpy as np
import tensorflow as tf
import gym
import tensorlayer as tl
from tensorlayer.layers import DenseLayer, InputLayer
tf.logging.set_verbosity(tf.logging.DEBUG)
tl.logging.set_verbosity(tl.logging.DEBUG)
np.random.seed(2)
tf.set_random_seed(2) # reproducible
# hyper-parameters
OUTPUT_GRAPH = False
MAX_EPISODE = 3000
DISPLAY_REWARD_THRESHOLD = 100 # renders environment if running reward is greater then this threshold
MAX_EP_STEPS = 1000 # maximum time step in one episode
RENDER = False # rendering wastes time
LAMBDA = 0.9 # reward discount in TD error
LR_A = 0.001 # learning rate for actor
LR_C = 0.01 # learning rate for critic
env = gym.make('CartPole-v0')
env.seed(2) # reproducible
# env = env.unwrapped
N_F = env.observation_space.shape[0]
N_A = env.action_space.n
# env.action_space.sample() random sample
print("observation dimension: %d" % N_F) # 4
print("observation high: %s" % env.observation_space.high) # [ 2.4 , inf , 0.41887902 , inf]
print("observation low : %s" % env.observation_space.low) # [-2.4 , -inf , -0.41887902 , -inf]
print("num of actions: %d" % N_A) # 2 : left or right
class Actor(object):
def __init__(self, sess, n_features, n_actions, lr=0.001):
self.sess = sess
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
self.a = tf.placeholder(tf.int32, [None], "act")
self.td_error = tf.placeholder(tf.float32, [None], "td_error") # TD_error
with tf.variable_scope('Actor'): # Policy network
n = InputLayer(self.s, name='in')
n = DenseLayer(n, n_units=30, act=tf.nn.relu6, W_init=tf.random_uniform_initializer(0, 0.01), name='hidden')
# n = DenseLayer(n, n_units=10, act=tf.nn.relu6, W_init=tf.random_uniform_initializer(0, 0.01), name='hidden2')
n = DenseLayer(n, n_units=n_actions, name='Pi')
self.acts_logits = n.outputs
self.acts_prob = tf.nn.softmax(self.acts_logits)
# Hao Dong
with tf.variable_scope('loss'):
self.exp_v = tl.rein.cross_entropy_reward_loss(
logits=self.acts_logits, actions=self.a, rewards=self.td_error, name='actor_weighted_loss'
)
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.exp_v)
# Morvan Zhou (the same)
# with tf.variable_scope('exp_v'):
# # log_prob = tf.log(self.acts_prob[0, self.a[0]])
# # self.exp_v = tf.reduce_mean(log_prob * self.td_error[0]) # advantage (TD_error) guided loss
# self.exp_v = tl.rein.log_weight(probs=self.acts_prob[0, self.a[0]], weights=self.td_error)
#
# with tf.variable_scope('train'):
# self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v) # minimize(-exp_v) = maximize(exp_v)
def learn(self, s, a, td):
_, exp_v = self.sess.run([self.train_op, self.exp_v], {self.s: [s], self.a: [a], self.td_error: td[0]})
return exp_v
def choose_action(self, s):
probs = self.sess.run(self.acts_prob, {self.s: [s]}) # get probabilities of all actions
return tl.rein.choice_action_by_probs(probs.ravel())
def choose_action_greedy(self, s):
probs = self.sess.run(self.acts_prob, {self.s: [s]}) # get probabilities of all actions
return np.argmax(probs.ravel())
class Critic(object):
def __init__(self, sess, n_features, lr=0.01):
self.sess = sess
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
self.r = tf.placeholder(tf.float32, None, 'r')
with tf.variable_scope('Critic'): # we use Value-function here, not Action-Value-function
n = InputLayer(self.s, name='in')
n = DenseLayer(n, n_units=30, act=tf.nn.relu6, W_init=tf.random_uniform_initializer(0, 0.01), name='hidden')
# n = DenseLayer(n, n_units=5, act=tf.nn.relu, W_init=tf.random_uniform_initializer(0, 0.01), name='hidden2')
n = DenseLayer(n, n_units=1, act=None, name='V')
self.v = n.outputs
with tf.variable_scope('squared_TD_error'):
# TD_error = r + lambd * V(newS) - V(S)
self.td_error = self.r + LAMBDA * self.v_ - self.v
self.loss = tf.square(self.td_error)
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)
def learn(self, s, r, s_):
v_ = self.sess.run(self.v, {self.s: [s_]})
td_error, _ = self.sess.run([self.td_error, self.train_op], {self.s: [s], self.v_: v_, self.r: r})
return td_error
sess = tf.Session()
actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A)
# we need a good teacher, so the teacher should learn faster than the actor
critic = Critic(sess, n_features=N_F, lr=LR_C)
tl.layers.initialize_global_variables(sess)
if OUTPUT_GRAPH:
tf.summary.FileWriter("logs/", sess.graph)
for i_episode in range(MAX_EPISODE):
episode_time = time.time()
s = env.reset()
t = 0 # number of step in this episode
all_r = [] # rewards of all steps
while True:
if RENDER: env.render()
a = actor.choose_action(s)
s_new, r, done, info = env.step(a)
if done: r = -20
# these may helpful in some tasks
# if abs(s_new[0]) >= env.observation_space.high[0]:
# # cart moves more than 2.4 units from the center
# r = -20
# reward for the distance between cart to the center
# r -= abs(s_new[0]) * .1
all_r.append(r)
td_error = critic.learn(s, r, s_new) # learn Value-function : gradient = grad[r + lambda * V(s_new) - V(s)]
actor.learn(s, a, td_error) # learn Policy : true_gradient = grad[logPi(s, a) * td_error]
s = s_new
t += 1
if done or t >= MAX_EP_STEPS:
ep_rs_sum = sum(all_r)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.95 + ep_rs_sum * 0.05
# start rending if running_reward greater than a threshold
# if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True
print("Episode: %d reward: %f running_reward %f took: %.5f" % \
(i_episode, ep_rs_sum, running_reward, time.time() - episode_time))
# Early Stopping for quick check
if t >= MAX_EP_STEPS:
print("Early Stopping")
s = env.reset()
rall = 0
while True:
env.render()
# a = actor.choose_action(s)
a = actor.choose_action_greedy(s) # Hao Dong: it is important for this task
s_new, r, done, info = env.step(a)
s_new = np.concatenate((s_new[0:N_F], s[N_F:]), axis=0)
rall += r
s = s_new
if done:
print("reward", rall)
s = env.reset()
rall = 0
break
| {
"repo_name": "zsdonghao/tensorlayer",
"path": "examples/reinforcement_learning/tutorial_cartpole_ac.py",
"copies": "1",
"size": "8217",
"license": "apache-2.0",
"hash": 3562028876724424000,
"line_mean": 37.0416666667,
"line_max": 123,
"alpha_frac": 0.5941341122,
"autogenerated": false,
"ratio": 3.1410550458715596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9228014341078588,
"avg_score": 0.0014349633985942278,
"num_lines": 216
} |
""" Actor for rodario framework """
# stdlib
import atexit
from uuid import uuid4
from time import sleep
from threading import Thread, Event
import pickle
import inspect
# local
from rodario import get_redis_connection
from rodario.registry import Registry
from rodario.exceptions import UUIDInUseException
REGISTRY = Registry()
# pylint: disable=E1101
class Actor(object):
""" Base Actor class """
#: Threading Event to tell the message handling loop to die
# (needed in __del__ so must be defined here)
_stop = None
#: Redis PubSub client
_pubsub = None
def __init__(self, uuid=None):
"""
Initialize the Actor object.
:param str uuid: Optionally-provided UUID
"""
atexit.register(self.__del__)
self._stop = Event()
#: Separate Thread for handling messages
self._proc = None
#: Redis connection
self._redis = get_redis_connection()
# pylint: disable=E1123
self._pubsub = self._redis.pubsub(ignore_subscribe_messages=True)
if uuid:
self.uuid = uuid
else:
self.uuid = str(uuid4())
if not REGISTRY.exists(self.uuid):
REGISTRY.register(self.uuid)
else:
self.uuid = None
raise UUIDInUseException('UUID is already taken')
def __del__(self):
""" Clean up. """
if hasattr(self, 'uuid'):
REGISTRY.unregister(self.uuid)
self.stop()
@property
def is_alive(self):
"""
Return True if this Actor is still alive.
:rtype: :class:`bool`
"""
return not self._stop.is_set()
def _handler(self, message):
"""
Send proxied method call results back through pubsub.
:param tuple message: The message to dissect
"""
data = pickle.loads(message['data'])
if not data[2]:
# empty method call; bail out
return
# call the function and respond to the proxy object with return value
uuid = data[0]
proxy = data[1]
func = getattr(self, data[2])
result = (uuid, func(*data[3], **data[4]))
self._redis.publish('proxy:%s' % proxy, pickle.dumps(result))
def _get_methods(self):
"""
List all of this Actor's methods (for creating remote proxies).
:rtype: :class:`list`
"""
methods = inspect.getmembers(self, predicate=callable)
method_list = set()
for name, _ in methods:
if (name in ('proxy', 'start', 'stop', 'part', 'join',)
or name[0] == '_'):
continue
method_list.add(name)
return method_list
def join(self, channel, func=None):
"""
Join this Actor to a pubsub cluster channel.
:param str channel: The channel to join
:param callable func: The message handler function
"""
self._pubsub.subscribe(**{'cluster:%s' % channel: func
if func is not None
else self._handler})
def part(self, channel):
"""
Remove this Actor from a pubsub cluster channel.
:param str channel: The channel to part
"""
self._pubsub.unsubscribe('cluster:%s' % channel)
def proxy(self):
"""
Wrap this Actor in an ActorProxy object.
:rtype: :class:`rodario.actors.ActorProxy`
"""
# avoid cyclic import
proxy_module = __import__('rodario.actors', fromlist=('ActorProxy',))
return proxy_module.ActorProxy(self)
def start(self):
""" Fire up the message handler thread. """
def pubsub_thread():
""" Call get_message in loop to fire _handler. """
while not self._stop.is_set():
self._pubsub.get_message()
sleep(0.01)
# subscribe to personal channel and fire up the message handler
self._pubsub.subscribe(**{'actor:%s' % self.uuid: self._handler})
self._proc = Thread(target=pubsub_thread)
self._proc.daemon = True
self._proc.start()
def stop(self):
""" Kill the message handler thread. """
self._stop.set()
| {
"repo_name": "haliphax/rodario",
"path": "rodario/actors/actor.py",
"copies": "1",
"size": "4348",
"license": "mit",
"hash": 8123875098153982000,
"line_mean": 25.1927710843,
"line_max": 78,
"alpha_frac": 0.5547378105,
"autogenerated": false,
"ratio": 4.352352352352352,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5407090162852353,
"avg_score": null,
"num_lines": null
} |
"""ActorInterval module: contains the ActorInterval class"""
__all__ = ['ActorInterval', 'LerpAnimInterval']
from panda3d.core import *
from panda3d.direct import *
from direct.directnotify.DirectNotifyGlobal import *
from . import Interval
import math
class ActorInterval(Interval.Interval):
# create ActorInterval DirectNotify category
notify = directNotify.newCategory('ActorInterval')
# Name counter
animNum = 1
# Class methods
# Plays an animation on an Actor. The subrange of the animation
# to be played may be specified via frames (startFrame up to and
# including endFrame) or seconds (startTime up to and including
# endTime). If neither is specified, the default is the entire
# range of the animation.
# The duration may be implicit or explicit. If it is omitted, it
# is taken to be endTime - startTime. There's not much point in
# specifying otherwise unless you also specify loop=1, which will
# loop the animation over its frame range during the duration of
# the interval.
# Note: if loop == 0 and duration > anim duration then the
# animation will play once and then hold its final pose for the
# remainder of the interval.
# loop = 1 implies a loop within the entire range of animation,
# while constrainedLoop = 1 implies a loop within startFrame and
# endFrame only.
def __init__(self, actor, animName, loop=0, constrainedLoop=0,
duration=None, startTime=None, endTime=None,
startFrame=None, endFrame=None,
playRate=1.0, name=None, forceUpdate=0,
partName=None, lodName=None):
# Generate unique id
id = 'Actor-%s-%d' % (animName, ActorInterval.animNum)
ActorInterval.animNum += 1
# Record class specific variables
self.actor = actor
self.animName = animName
self.controls = self.actor.getAnimControls(
self.animName, partName = partName, lodName = lodName)
self.loopAnim = loop
self.constrainedLoop = constrainedLoop
self.forceUpdate = forceUpdate
self.playRate = playRate
# If no name specified, use id as name
if (name == None):
name = id
if len(self.controls) == 0:
self.notify.warning("Unknown animation for actor: %s" % (self.animName))
self.frameRate = 1.0
self.startFrame = 0
self.endFrame = 0
else:
self.frameRate = self.controls[0].getFrameRate() * abs(playRate)
# Compute start and end frames.
if startFrame != None:
self.startFrame = startFrame
elif startTime != None:
self.startFrame = startTime * self.frameRate
else:
self.startFrame = 0
if endFrame != None:
self.endFrame = endFrame
elif endTime != None:
self.endFrame = endTime * self.frameRate
elif duration != None:
if startTime == None:
startTime = float(self.startFrame) / float(self.frameRate)
endTime = startTime + duration
self.endFrame = endTime * self.frameRate
else:
# No end frame specified. Choose the maximum of all
# of the controls' numbers of frames.
maxFrames = self.controls[0].getNumFrames()
warned = 0
for i in range(1, len(self.controls)):
numFrames = self.controls[i].getNumFrames()
if numFrames != maxFrames and numFrames != 1 and not warned:
self.notify.warning("Animations '%s' on %s have an inconsistent number of frames." % (animName, actor.getName()))
warned = 1
maxFrames = max(maxFrames, numFrames)
self.endFrame = maxFrames - 1
# Must we play the animation backwards? We play backwards if
# either (or both) of the following is true: the playRate is
# negative, or endFrame is before startFrame.
self.reverse = (playRate < 0)
if self.endFrame < self.startFrame:
self.reverse = 1
t = self.endFrame
self.endFrame = self.startFrame
self.startFrame = t
self.numFrames = self.endFrame - self.startFrame + 1
# Compute duration if no duration specified
self.implicitDuration = 0
if duration == None:
self.implicitDuration = 1
duration = float(self.numFrames) / self.frameRate
# Initialize superclass
Interval.Interval.__init__(self, name, duration)
def getCurrentFrame(self):
"""Calculate the current frame playing in this interval.
returns a float value between startFrame and endFrame, inclusive
returns None if there are any problems
"""
retval = None
if not self.isStopped():
framesPlayed = self.numFrames * self.currT
retval = self.startFrame + framesPlayed
return retval
def privStep(self, t):
frameCount = t * self.frameRate
if self.constrainedLoop:
frameCount = frameCount % self.numFrames
if self.reverse:
absFrame = self.endFrame - frameCount
else:
absFrame = self.startFrame + frameCount
# Calc integer frame number
intFrame = int(math.floor(absFrame + 0.0001))
# Pose anim
# We use our pre-computed list of animControls for
# efficiency's sake, rather than going through the relatively
# expensive Actor interface every frame.
for control in self.controls:
# Each animControl might have a different number of frames.
numFrames = control.getNumFrames()
if self.loopAnim:
frame = (intFrame % numFrames) + (absFrame - intFrame)
else:
frame = max(min(absFrame, numFrames - 1), 0)
control.pose(frame)
if self.forceUpdate:
self.actor.update()
self.state = CInterval.SStarted
self.currT = t
def privFinalize(self):
if self.implicitDuration and not self.loopAnim:
# As a special case, we ensure we end up posed to the last
# frame of the animation if the original duration was
# implicit. This is necessary only to guard against
# possible roundoff error in computing the final frame
# from the duration. We don't do this in the case of a
# looping animation, however, because this would introduce
# a hitch in the animation when it plays back-to-back with
# the next cycle.
if self.reverse:
for control in self.controls:
control.pose(self.startFrame)
else:
for control in self.controls:
control.pose(self.endFrame)
if self.forceUpdate:
self.actor.update()
else:
# Otherwise, the user-specified duration determines which
# is our final frame.
self.privStep(self.getDuration())
self.state = CInterval.SFinal
self.intervalDone()
# If we want to change what part this interval is playing on after
# the interval has been created, call resetControls and pass in a partName
# and optional lod param
def resetControls(self, partName, lodName=None):
self.controls = self.actor.getAnimControls(
self.animName, partName = partName, lodName = lodName)
class LerpAnimInterval(CLerpAnimEffectInterval):
# Blends between two anims. Start both anims first (or use
# parallel ActorIntervals), then invoke LerpAnimInterval to
# smoothly blend the control effect from the first to the second.
lerpAnimNum = 1
def __init__(self, actor, duration, startAnim, endAnim,
startWeight = 0.0, endWeight = 1.0,
blendType = 'noBlend', name = None,
partName=None, lodName=None):
# Generate unique name if necessary
if (name == None):
name = 'LerpAnimInterval-%d' % LerpAnimInterval.lerpAnimNum
LerpAnimInterval.lerpAnimNum += 1
blendType = self.stringBlendType(blendType)
assert blendType != self.BTInvalid
# Initialize superclass
CLerpAnimEffectInterval.__init__(self, name, duration, blendType)
if startAnim != None:
controls = actor.getAnimControls(
startAnim, partName = partName, lodName = lodName)
#controls = actor.getAnimControls(startAnim)
for control in controls:
self.addControl(control, startAnim,
1.0 - startWeight, 1.0 - endWeight)
if endAnim != None:
controls = actor.getAnimControls(
endAnim, partName = partName, lodName = lodName)
#controls = actor.getAnimControls(endAnim)
for control in controls:
self.addControl(control, endAnim,
startWeight, endWeight)
| {
"repo_name": "chandler14362/panda3d",
"path": "direct/src/interval/ActorInterval.py",
"copies": "7",
"size": "9301",
"license": "bsd-3-clause",
"hash": 1003216986389277700,
"line_mean": 38.5787234043,
"line_max": 137,
"alpha_frac": 0.60133319,
"autogenerated": false,
"ratio": 4.403882575757576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036282238694679636,
"num_lines": 235
} |
"""ActorInterval module: contains the ActorInterval class"""
__all__ = ['ActorInterval', 'LerpAnimInterval']
from pandac.PandaModules import *
from direct.directnotify.DirectNotifyGlobal import *
import Interval
import math
class ActorInterval(Interval.Interval):
# create ActorInterval DirectNotify category
notify = directNotify.newCategory('ActorInterval')
# Name counter
animNum = 1
# Class methods
# Plays an animation on an Actor. The subrange of the animation
# to be played may be specified via frames (startFrame up to and
# including endFrame) or seconds (startTime up to and including
# endTime). If neither is specified, the default is the entire
# range of the animation.
# The duration may be implicit or explicit. If it is omitted, it
# is taken to be endTime - startTime. There's not much point in
# specifying otherwise unless you also specify loop=1, which will
# loop the animation over its frame range during the duration of
# the interval.
# Note: if loop == 0 and duration > anim duration then the
# animation will play once and then hold its final pose for the
# remainder of the interval.
# loop = 1 implies a loop within the entire range of animation,
# while constrainedLoop = 1 implies a loop within startFrame and
# endFrame only.
def __init__(self, actor, animName, loop=0, constrainedLoop=0,
duration=None, startTime=None, endTime=None,
startFrame=None, endFrame=None,
playRate=1.0, name=None, forceUpdate=0,
partName=None, lodName=None):
# Generate unique id
id = 'Actor-%s-%d' % (animName, ActorInterval.animNum)
ActorInterval.animNum += 1
# Record class specific variables
self.actor = actor
self.animName = animName
self.controls = self.actor.getAnimControls(
self.animName, partName = partName, lodName = lodName)
self.loopAnim = loop
self.constrainedLoop = constrainedLoop
self.forceUpdate = forceUpdate
self.playRate = playRate
# If no name specified, use id as name
if (name == None):
name = id
if len(self.controls) == 0:
self.notify.warning("Unknown animation for actor: %s" % (self.animName))
self.frameRate = 1.0
self.startFrame = 0
self.endFrame = 0
else:
self.frameRate = self.controls[0].getFrameRate() * abs(playRate)
# Compute start and end frames.
if startFrame != None:
self.startFrame = startFrame
elif startTime != None:
self.startFrame = startTime * self.frameRate
else:
self.startFrame = 0
if endFrame != None:
self.endFrame = endFrame
elif endTime != None:
self.endFrame = endTime * self.frameRate
elif duration != None:
if startTime == None:
startTime = float(self.startFrame) / float(self.frameRate)
endTime = startTime + duration
self.endFrame = duration * self.frameRate
else:
# No end frame specified. Choose the maximum of all
# of the controls' numbers of frames.
maxFrames = self.controls[0].getNumFrames()
warned = 0
for i in range(1, len(self.controls)):
numFrames = self.controls[i].getNumFrames()
if numFrames != maxFrames and numFrames != 1 and not warned:
self.notify.warning("Animations '%s' on %s have an inconsistent number of frames." % (animName, actor.getName()))
warned = 1
maxFrames = max(maxFrames, numFrames)
self.endFrame = maxFrames - 1
# Must we play the animation backwards? We play backwards if
# either (or both) of the following is true: the playRate is
# negative, or endFrame is before startFrame.
self.reverse = (playRate < 0)
if self.endFrame < self.startFrame:
self.reverse = 1
t = self.endFrame
self.endFrame = self.startFrame
self.startFrame = t
self.numFrames = self.endFrame - self.startFrame + 1
# Compute duration if no duration specified
self.implicitDuration = 0
if duration == None:
self.implicitDuration = 1
duration = float(self.numFrames) / self.frameRate
# Initialize superclass
Interval.Interval.__init__(self, name, duration)
def getCurrentFrame(self):
"""Calculate the current frame playing in this interval.
returns a float value between startFrame and endFrame, inclusive
returns None if there are any problems
"""
retval = None
if not self.isStopped():
framesPlayed = self.numFrames * self.currT
retval = self.startFrame + framesPlayed
return retval
def privStep(self, t):
frameCount = t * self.frameRate
if self.constrainedLoop:
frameCount = frameCount % self.numFrames
if self.reverse:
absFrame = self.endFrame - frameCount
else:
absFrame = self.startFrame + frameCount
# Calc integer frame number
intFrame = int(math.floor(absFrame + 0.0001))
# Pose anim
# We use our pre-computed list of animControls for
# efficiency's sake, rather than going through the relatively
# expensive Actor interface every frame.
for control in self.controls:
# Each animControl might have a different number of frames.
numFrames = control.getNumFrames()
if self.loopAnim:
frame = (intFrame % numFrames) + (absFrame - intFrame)
else:
frame = max(min(absFrame, numFrames - 1), 0)
control.pose(frame)
if self.forceUpdate:
self.actor.update()
self.state = CInterval.SStarted
self.currT = t
def privFinalize(self):
if self.implicitDuration and not self.loopAnim:
# As a special case, we ensure we end up posed to the last
# frame of the animation if the original duration was
# implicit. This is necessary only to guard against
# possible roundoff error in computing the final frame
# from the duration. We don't do this in the case of a
# looping animation, however, because this would introduce
# a hitch in the animation when it plays back-to-back with
# the next cycle.
if self.reverse:
for control in self.controls:
control.pose(self.startFrame)
else:
for control in self.controls:
control.pose(self.endFrame)
if self.forceUpdate:
self.actor.update()
else:
# Otherwise, the user-specified duration determines which
# is our final frame.
self.privStep(self.getDuration())
self.state = CInterval.SFinal
self.intervalDone()
# If we want to change what part this interval is playing on after
# the interval has been created, call resetControls and pass in a partName
# and optional lod param
def resetControls(self, partName, lodName=None):
self.controls = self.actor.getAnimControls(
self.animName, partName = partName, lodName = lodName)
class LerpAnimInterval(CLerpAnimEffectInterval):
# Blends between two anims. Start both anims first (or use
# parallel ActorIntervals), then invoke LerpAnimInterval to
# smoothly blend the control effect from the first to the second.
lerpAnimNum = 1
def __init__(self, actor, duration, startAnim, endAnim,
startWeight = 0.0, endWeight = 1.0,
blendType = 'noBlend', name = None,
partName=None, lodName=None):
# Generate unique name if necessary
if (name == None):
name = 'LerpAnimInterval-%d' % LerpAnimInterval.lerpAnimNum
LerpAnimInterval.lerpAnimNum += 1
blendType = self.stringBlendType(blendType)
assert blendType != self.BTInvalid
# Initialize superclass
CLerpAnimEffectInterval.__init__(self, name, duration, blendType)
if startAnim != None:
controls = actor.getAnimControls(
startAnim, partName = partName, lodName = lodName)
#controls = actor.getAnimControls(startAnim)
for control in controls:
self.addControl(control, startAnim,
1.0 - startWeight, 1.0 - endWeight)
if endAnim != None:
controls = actor.getAnimControls(
endAnim, partName = partName, lodName = lodName)
#controls = actor.getAnimControls(endAnim)
for control in controls:
self.addControl(control, endAnim,
startWeight, endWeight)
| {
"repo_name": "jjkoletar/panda3d",
"path": "direct/src/interval/ActorInterval.py",
"copies": "3",
"size": "9321",
"license": "bsd-3-clause",
"hash": 6906501451684062000,
"line_mean": 38.8333333333,
"line_max": 137,
"alpha_frac": 0.598004506,
"autogenerated": false,
"ratio": 4.438571428571429,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6536575934571429,
"avg_score": null,
"num_lines": null
} |
"""Actor module: contains the Actor class"""
__all__ = ['Actor']
from panda3d.core import *
from direct.showbase.DirectObject import DirectObject
from direct.directnotify import DirectNotifyGlobal
import types
class Actor(DirectObject, NodePath):
"""
Actor class: Contains methods for creating, manipulating
and playing animations on characters
"""
notify = DirectNotifyGlobal.directNotify.newCategory("Actor")
partPrefix = "__Actor_"
modelLoaderOptions = LoaderOptions(LoaderOptions.LFSearch |
LoaderOptions.LFReportErrors |
LoaderOptions.LFConvertSkeleton)
animLoaderOptions = LoaderOptions(LoaderOptions.LFSearch |
LoaderOptions.LFReportErrors |
LoaderOptions.LFConvertAnim)
validateSubparts = ConfigVariableBool('validate-subparts', True)
mergeLODBundles = ConfigVariableBool('merge-lod-bundles', True)
allowAsyncBind = ConfigVariableBool('allow-async-bind', True)
class PartDef:
"""Instances of this class are stored within the
PartBundleDict to track all of the individual PartBundles
associated with the Actor. In general, each separately loaded
model file is a different PartBundle. This can include the
multiple different LOD's, as well as the multiple different
pieces of a multipart Actor. """
def __init__(self, partBundleNP, partBundleHandle, partModel):
# We also save the ModelRoot node along with the
# PartBundle, so that the reference count in the ModelPool
# will be accurate.
self.partBundleNP = partBundleNP
self.partBundleHandle = partBundleHandle
self.partModel = partModel
def getBundle(self):
return self.partBundleHandle.getBundle()
def __repr__(self):
return 'Actor.PartDef(%s, %s)' % (repr(self.partBundleNP), repr(self.partModel))
class AnimDef:
"""Instances of this class are stored within the
AnimControlDict to track all of the animations associated with
the Actor. This includes animations that have already been
bound (these have a valid AnimControl) as well as those that
have not yet been bound (for these, self.animControl is None).
There is a different AnimDef for each different part or
sub-part, times each different animation in the AnimDict. """
def __init__(self, filename = None, animBundle = None):
self.filename = filename
self.animBundle = None
self.animControl = None
def makeCopy(self):
return Actor.AnimDef(self.filename, self.animBundle)
def __repr__(self):
return 'Actor.AnimDef(%s)' % (repr(self.filename))
class SubpartDef:
"""Instances of this class are stored within the SubpartDict
to track the existance of arbitrary sub-parts. These are
designed to appear to the user to be identical to true "part"
of a multi-part Actor, but in fact each subpart represents a
subset of the joints of an existing part (which is accessible
via a different name). """
def __init__(self, truePartName, subset = PartSubset()):
self.truePartName = truePartName
self.subset = subset
def makeCopy(self):
return Actor.SubpartDef(self.truePartName, PartSubset(self.subset))
def __repr__(self):
return 'Actor.SubpartDef(%s, %s)' % (repr(self.truePartName), repr(self.subset))
def __init__(self, models=None, anims=None, other=None, copy=True,
lodNode = None, flattenable = True, setFinal = False,
mergeLODBundles = None, allowAsyncBind = None,
okMissing = None):
"""__init__(self, string | string:string{}, string:string{} |
string:(string:string{}){}, Actor=None)
Actor constructor: can be used to create single or multipart
actors. If another Actor is supplied as an argument this
method acts like a copy constructor. Single part actors are
created by calling with a model and animation dictionary
(animName:animPath{}) as follows:
a = Actor("panda-3k.egg", {"walk":"panda-walk.egg" \
"run":"panda-run.egg"})
This could be displayed and animated as such:
a.reparentTo(render)
a.loop("walk")
a.stop()
Multipart actors expect a dictionary of parts and a dictionary
of animation dictionaries (partName:(animName:animPath{}){}) as
below:
a = Actor(
# part dictionary
{"head":"char/dogMM/dogMM_Shorts-head-mod", \
"torso":"char/dogMM/dogMM_Shorts-torso-mod", \
"legs":"char/dogMM/dogMM_Shorts-legs-mod"}, \
# dictionary of anim dictionaries
{"head":{"walk":"char/dogMM/dogMM_Shorts-head-walk", \
"run":"char/dogMM/dogMM_Shorts-head-run"}, \
"torso":{"walk":"char/dogMM/dogMM_Shorts-torso-walk", \
"run":"char/dogMM/dogMM_Shorts-torso-run"}, \
"legs":{"walk":"char/dogMM/dogMM_Shorts-legs-walk", \
"run":"char/dogMM/dogMM_Shorts-legs-run"} \
})
In addition multipart actor parts need to be connected together
in a meaningful fashion:
a.attach("head", "torso", "joint-head")
a.attach("torso", "legs", "joint-hips")
#
# ADD LOD COMMENT HERE!
#
Other useful Actor class functions:
#fix actor eye rendering
a.drawInFront("joint-pupil?", "eyes*")
#fix bounding volumes - this must be done after drawing
#the actor for a few frames, otherwise it has no effect
a.fixBounds()
"""
try:
self.Actor_initialized
return
except:
self.Actor_initialized = 1
# initialize our NodePath essence
NodePath.__init__(self)
# Set the mergeLODBundles flag. If this is true, all
# different LOD's will be merged into a single common bundle
# (joint hierarchy). All LOD's will thereafter share the same
# skeleton, even though they may have been loaded from
# different egg files. If this is false, LOD's will be kept
# completely isolated, and each LOD will have its own
# skeleton.
# When this flag is true, __animControlDict has only one key,
# ['common']; when it is false, __animControlDict has one key
# per each LOD name.
if mergeLODBundles is None:
# If this isn't specified, it comes from the Config.prc
# file.
self.mergeLODBundles = Actor.mergeLODBundles.getValue()
else:
self.mergeLODBundles = mergeLODBundles
# Set the allowAsyncBind flag. If this is true, it enables
# asynchronous animation binding. This requires that you have
# run "egg-optchar -preload" on your animation and models to
# generate the appropriate AnimPreloadTable.
if allowAsyncBind is None:
self.allowAsyncBind = Actor.allowAsyncBind.getValue()
else:
self.allowAsyncBind = allowAsyncBind
# create data structures
self.__commonBundleHandles = {}
self.__partBundleDict = {}
self.__subpartDict = {}
self.__sortedLODNames = []
self.__animControlDict = {}
self.__subpartsComplete = False
self.__LODNode = None
self.__LODAnimation = None
self.__LODCenter = Point3(0, 0, 0)
self.switches = None
if (other == None):
# act like a normal constructor
# create base hierarchy
self.gotName = 0
if flattenable:
# If we want a flattenable Actor, don't create all
# those ModelNodes, and the GeomNode is the same as
# the root.
root = PandaNode('actor')
self.assign(NodePath(root))
self.setGeomNode(NodePath(self))
else:
# A standard Actor has a ModelNode at the root, and
# another ModelNode to protect the GeomNode.
root = ModelNode('actor')
root.setPreserveTransform(1)
self.assign(NodePath(root))
self.setGeomNode(self.attachNewNode(ModelNode('actorGeom')))
self.__hasLOD = 0
# load models
#
# four cases:
#
# models, anims{} = single part actor
# models{}, anims{} = single part actor w/ LOD
# models{}, anims{}{} = multi-part actor
# models{}{}, anims{}{} = multi-part actor w/ LOD
#
# make sure we have models
if (models):
# do we have a dictionary of models?
if (type(models)==type({})):
# if this is a dictionary of dictionaries
if (type(models[models.keys()[0]]) == type({})):
# then it must be a multipart actor w/LOD
self.setLODNode(node = lodNode)
# preserve numerical order for lod's
# this will make it easier to set ranges
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
# make a node under the LOD switch
# for each lod (just because!)
self.addLOD(str(lodName))
# iterate over both dicts
for modelName in models[lodName].keys():
self.loadModel(models[lodName][modelName],
modelName, lodName, copy = copy,
okMissing = okMissing)
# then if there is a dictionary of dictionaries of anims
elif (type(anims[anims.keys()[0]])==type({})):
# then this is a multipart actor w/o LOD
for partName in models.keys():
# pass in each part
self.loadModel(models[partName], partName,
copy = copy, okMissing = okMissing)
else:
# it is a single part actor w/LOD
self.setLODNode(node = lodNode)
# preserve order of LOD's
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
self.addLOD(str(lodName))
# pass in dictionary of parts
self.loadModel(models[lodName], lodName=lodName,
copy = copy, okMissing = okMissing)
else:
# else it is a single part actor
self.loadModel(models, copy = copy, okMissing = okMissing)
# load anims
# make sure the actor has animations
if (anims):
if (len(anims) >= 1):
# if so, does it have a dictionary of dictionaries?
if (type(anims[anims.keys()[0]])==type({})):
# are the models a dict of dicts too?
if (type(models)==type({})):
if (type(models[models.keys()[0]]) == type({})):
# then we have a multi-part w/ LOD
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
# iterate over both dicts
for partName in anims.keys():
self.loadAnims(
anims[partName], partName, lodName)
else:
# then it must be multi-part w/o LOD
for partName in anims.keys():
self.loadAnims(anims[partName], partName)
elif (type(models)==type({})):
# then we have single-part w/ LOD
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
self.loadAnims(anims, lodName=lodName)
else:
# else it is single-part w/o LOD
self.loadAnims(anims)
else:
self.copyActor(other, True) # overwrite everything
if setFinal:
# If setFinal is true, the Actor will set its top bounding
# volume to be the "final" bounding volume: the bounding
# volumes below the top volume will not be tested. If a
# cull test passes the top bounding volume, the whole
# Actor is rendered.
# We do this partly because an Actor is likely to be a
# fairly small object relative to the scene, and is pretty
# much going to be all onscreen or all offscreen anyway;
# and partly because of the Character bug that doesn't
# update the bounding volume for pieces that animate away
# from their original position. It's disturbing to see
# someone's hands disappear; better to cull the whole
# object or none of it.
self.__geomNode.node().setFinal(1)
def delete(self):
try:
self.Actor_deleted
return
except:
self.Actor_deleted = 1
self.cleanup()
def copyActor(self, other, overwrite=False):
# act like a copy constructor
self.gotName = other.gotName
# copy the scene graph elements of other
if (overwrite):
otherCopy = other.copyTo(NodePath())
otherCopy.detachNode()
# assign these elements to ourselve (overwrite)
self.assign(otherCopy)
else:
# just copy these to ourselves
otherCopy = other.copyTo(self)
# masad: check if otherCopy has a geomNode as its first child
# if actor is initialized with flattenable, then otherCopy, not
# its first child, is the geom node; check __init__, for reference
if other.getGeomNode().getName() == other.getName():
self.setGeomNode(otherCopy)
else:
self.setGeomNode(otherCopy.getChild(0))
# copy the switches for lods
self.switches = other.switches
self.__LODNode = self.find('**/+LODNode')
self.__hasLOD = 0
if (not self.__LODNode.isEmpty()):
self.__hasLOD = 1
# copy the part dictionary from other
self.__copyPartBundles(other)
self.__copySubpartDict(other)
self.__subpartsComplete = other.__subpartsComplete
# copy the anim dictionary from other
self.__copyAnimControls(other)
def __cmp__(self, other):
# Actor inherits from NodePath, which inherits a definition of
# __cmp__ from FFIExternalObject that uses the NodePath's
# compareTo() method to compare different NodePaths. But we
# don't want this behavior for Actors; Actors should only be
# compared pointerwise. A NodePath that happens to reference
# the same node is still different from the Actor.
if self is other:
return 0
else:
return 1
def __str__(self):
"""
Actor print function
"""
return "Actor %s, parts = %s, LODs = %s, anims = %s" % \
(self.getName(), self.getPartNames(), self.getLODNames(), self.getAnimNames())
def listJoints(self, partName="modelRoot", lodName="lodRoot"):
"""Handy utility function to list the joint hierarchy of the
actor. """
if self.mergeLODBundles:
partBundleDict = self.__commonBundleHandles
else:
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.error("no lod named: %s" % (lodName))
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef == None:
Actor.notify.error("no part named: %s" % (partName))
self.__doListJoints(0, partDef.getBundle(),
subpartDef.subset.isIncludeEmpty(), subpartDef.subset)
def __doListJoints(self, indentLevel, part, isIncluded, subset):
name = part.getName()
if subset.matchesInclude(name):
isIncluded = True
elif subset.matchesExclude(name):
isIncluded = False
if isIncluded:
value = ''
if hasattr(part, 'outputValue'):
lineStream = LineStream()
part.outputValue(lineStream)
value = lineStream.getLine()
print ' ' * indentLevel, part.getName(), value
for i in range(part.getNumChildren()):
self.__doListJoints(indentLevel + 2, part.getChild(i),
isIncluded, subset)
def getActorInfo(self):
"""
Utility function to create a list of information about an actor.
Useful for iterating over details of an actor.
"""
lodInfo = []
for lodName, partDict in self.__animControlDict.items():
if self.mergeLODBundles:
lodName = self.__sortedLODNames[0]
partInfo = []
for partName in partDict.keys():
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partBundleDict = self.__partBundleDict.get(lodName)
partDef = partBundleDict.get(subpartDef.truePartName)
partBundle = partDef.getBundle()
animDict = partDict[partName]
animInfo = []
for animName in animDict.keys():
file = animDict[animName].filename
animControl = animDict[animName].animControl
animInfo.append([animName, file, animControl])
partInfo.append([partName, partBundle, animInfo])
lodInfo.append([lodName, partInfo])
return lodInfo
def getAnimNames(self):
animNames = []
for lodName, lodInfo in self.getActorInfo():
for partName, bundle, animInfo in lodInfo:
for animName, file, animControl in animInfo:
if animName not in animNames:
animNames.append(animName)
return animNames
def pprint(self):
"""
Pretty print actor's details
"""
for lodName, lodInfo in self.getActorInfo():
print 'LOD:', lodName
for partName, bundle, animInfo in lodInfo:
print ' Part:', partName
print ' Bundle:', repr(bundle)
for animName, file, animControl in animInfo:
print ' Anim:', animName
print ' File:', file
if animControl == None:
print ' (not loaded)'
else:
print (' NumFrames: %d PlayRate: %0.2f' %
(animControl.getNumFrames(),
animControl.getPlayRate()))
def cleanup(self):
"""
Actor cleanup function
"""
self.stop(None)
self.clearPythonData()
self.flush()
if(self.__geomNode):
self.__geomNode.removeNode()
self.__geomNode = None
if not self.isEmpty():
self.removeNode()
def removeNode(self):
if self.__geomNode and (self.__geomNode.getNumChildren() > 0):
assert self.notify.warning("called actor.removeNode() on %s without calling cleanup()" % self.getName())
NodePath.removeNode(self)
def clearPythonData(self):
self.__commonBundleHandles = {}
self.__partBundleDict = {}
self.__subpartDict = {}
self.__sortedLODNames = []
self.__animControlDict = {}
def flush(self):
"""
Actor flush function
"""
self.clearPythonData()
if self.__LODNode and (not self.__LODNode.isEmpty()):
self.__LODNode.removeNode()
self.__LODNode = None
# remove all its children
if self.__geomNode:
self.__geomNode.getChildren().detach()
self.__hasLOD = 0
# accessing
def getAnimControlDict(self):
return self.__animControlDict
def removeAnimControlDict(self):
self.__animControlDict = {}
def getPartBundleDict(self):
return self.__partBundleDict
def getPartBundles(self, partName = None):
""" Returns a list of PartBundle objects for the entire Actor,
or for the indicated part only. """
bundles = []
for lodName, partBundleDict in self.__partBundleDict.items():
if partName == None:
for partDef in partBundleDict.values():
bundles.append(partDef.getBundle())
else:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
bundles.append(partDef.getBundle())
else:
Actor.notify.warning("Couldn't find part: %s" % (partName))
return bundles
def __updateSortedLODNames(self):
# Cache the sorted LOD names so we don't have to grab them
# and sort them every time somebody asks for the list
self.__sortedLODNames = self.__partBundleDict.keys()
# Reverse sort the doing a string->int
def sortKey(x):
if not str(x).isdigit():
smap = {'h':3,
'm':2,
'l':1,
'f':0}
"""
sx = smap.get(x[0], None)
if sx is None:
self.notify.error('Invalid lodName: %s' % x)
"""
return smap[x[0]]
else:
return int(x)
self.__sortedLODNames.sort(key=sortKey, reverse=True)
def getLODNames(self):
"""
Return list of Actor LOD names. If not an LOD actor,
returns 'lodRoot'
Caution - this returns a reference to the list - not your own copy
"""
return self.__sortedLODNames
def getPartNames(self):
"""
Return list of Actor part names. If not an multipart actor,
returns 'modelRoot' NOTE: returns parts of arbitrary LOD
"""
partNames = []
if self.__partBundleDict:
partNames = self.__partBundleDict.values()[0].keys()
return partNames + self.__subpartDict.keys()
def getGeomNode(self):
"""
Return the node that contains all actor geometry
"""
return self.__geomNode
def setGeomNode(self, node):
"""
Set the node that contains all actor geometry
"""
self.__geomNode = node
def getLODNode(self):
"""
Return the node that switches actor geometry in and out"""
return self.__LODNode.node()
def setLODNode(self, node=None):
"""
Set the node that switches actor geometry in and out.
If one is not supplied as an argument, make one
"""
if (node == None):
node = LODNode.makeDefaultLod("lod")
if self.__LODNode:
self.__LODNode = node
else:
self.__LODNode = self.__geomNode.attachNewNode(node)
self.__hasLOD = 1
self.switches = {}
def useLOD(self, lodName):
"""
Make the Actor ONLY display the given LOD
"""
# make sure we don't call this twice in a row
# and pollute the the switches dictionary
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
child = self.__LODNode.find(str(lodName))
index = self.__LODNode.node().findChild(child.node())
self.__LODNode.node().forceSwitch(index)
def printLOD(self):
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
sortedKeys = self.__sortedLODNames
for eachLod in sortedKeys:
print "python switches for %s: in: %d, out %d" % (eachLod,
self.switches[eachLod][0],
self.switches[eachLod][1])
switchNum = self.__LODNode.node().getNumSwitches()
for eachSwitch in range(0, switchNum):
print "c++ switches for %d: in: %d, out: %d" % (eachSwitch,
self.__LODNode.node().getIn(eachSwitch),
self.__LODNode.node().getOut(eachSwitch))
def resetLOD(self):
"""
Restore all switch distance info (usually after a useLOD call)"""
self.__LODNode.node().clearForceSwitch()
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
## for eachLod in sortedKeys:
## index = sortedKeys.index(eachLod)
## self.__LODNode.node().setSwitch(index, self.switches[eachLod][0],
## self.switches[eachLod][1])
def addLOD(self, lodName, inDist=0, outDist=0, center=None):
"""addLOD(self, string)
Add a named node under the LODNode to parent all geometry
of a specific LOD under.
"""
self.__LODNode.attachNewNode(str(lodName))
# save the switch distance info
self.switches[lodName] = [inDist, outDist]
# add the switch distance info
self.__LODNode.node().addSwitch(inDist, outDist)
if center != None:
self.setCenter(center)
def setLOD(self, lodName, inDist=0, outDist=0):
"""setLOD(self, string)
Set the switch distance for given LOD
"""
# save the switch distance info
self.switches[lodName] = [inDist, outDist]
# add the switch distance info
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
self.__LODNode.node().setSwitch(self.getLODIndex(lodName), inDist, outDist)
def getLODIndex(self, lodName):
"""getLODIndex(self)
safe method (but expensive) for retrieving the child index
"""
return list(self.__LODNode.getChildren()).index(self.getLOD(lodName))
def getLOD(self, lodName):
"""getLOD(self, string)
Get the named node under the LOD to which we parent all LOD
specific geometry to. Returns 'None' if not found
"""
if self.__LODNode:
lod = self.__LODNode.find(str(lodName))
if lod.isEmpty():
return None
else:
return lod
else:
return None
def hasLOD(self):
"""
Return 1 if the actor has LODs, 0 otherwise
"""
return self.__hasLOD
def setCenter(self, center):
if center == None:
center = Point3(0, 0, 0)
self.__LODCenter = center
if self.__LODNode:
self.__LODNode.node().setCenter(self.__LODCenter)
if self.__LODAnimation:
self.setLODAnimation(*self.__LODAnimation)
def setLODAnimation(self, farDistance, nearDistance, delayFactor):
""" Activates a special mode in which the Actor animates less
frequently as it gets further from the camera. This is
intended as a simple optimization to minimize the effort of
computing animation for lots of characters that may not
necessarily be very important to animate every frame.
If the character is closer to the camera than near_distance,
then it is animated its normal rate, every frame. If the
character is exactly far_distance away, it is animated only
every delay_factor seconds (which should be a number greater
than 0). If the character is between near_distance and
far_distance, its animation rate is linearly interpolated
according to its distance between the two. The interpolation
function continues beyond far_distance, so that the character
is animated increasingly less frequently as it gets farther
away. """
self.__LODAnimation = (farDistance, nearDistance, delayFactor)
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().setLodAnimation(self.__LODCenter, farDistance, nearDistance, delayFactor)
def clearLODAnimation(self):
""" Description: Undoes the effect of a recent call to
set_lod_animation(). Henceforth, the character will animate
every frame, regardless of its distance from the camera.
"""
self.__LODAnimation = None
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().clearLodAnimation()
def update(self, lod=0, partName=None, lodName=None, force=False):
""" Updates all of the Actor's joints in the indicated LOD.
The LOD may be specified by name, or by number, where 0 is the
highest level of detail, 1 is the next highest, and so on.
If force is True, this will update every joint, even if we
don't believe it's necessary.
Returns True if any joint has changed as a result of this,
False otherwise. """
if lodName == None:
lodNames = self.getLODNames()
else:
lodNames = [lodName]
anyChanged = False
if lod < len(lodNames):
lodName = lodNames[lod]
if partName == None:
partBundleDict = self.__partBundleDict[lodName]
partNames = partBundleDict.keys()
else:
partNames = [partName]
for partName in partNames:
partBundle = self.getPartBundle(partName, lodNames[lod])
if force:
if partBundle.forceUpdate():
anyChanged = True
else:
if partBundle.update():
anyChanged = True
else:
self.notify.warning('update() - no lod: %d' % lod)
return anyChanged
def getFrameRate(self, animName=None, partName=None):
"""getFrameRate(self, string, string=None)
Return actual frame rate of given anim name and given part.
If no anim specified, use the currently playing anim.
If no part specified, return anim durations of first part.
NOTE: returns info only for an arbitrary LOD
"""
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getFrameRate()
def getBaseFrameRate(self, animName=None, partName=None):
"""getBaseFrameRate(self, string, string=None)
Return frame rate of given anim name and given part, unmodified
by any play rate in effect.
"""
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getAnim().getBaseFrameRate()
def getPlayRate(self, animName=None, partName=None):
"""
Return the play rate of given anim for a given part.
If no part is given, assume first part in dictionary.
If no anim is given, find the current anim for the part.
NOTE: Returns info only for an arbitrary LOD
"""
if self.__animControlDict:
# use the first lod
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if controls:
return controls[0].getPlayRate()
return None
def setPlayRate(self, rate, animName, partName=None):
"""setPlayRate(self, float, string, string=None)
Set the play rate of given anim for a given part.
If no part is given, set for all parts in dictionary.
It used to be legal to let the animName default to the
currently-playing anim, but this was confusing and could lead
to the wrong anim's play rate getting set. Better to insist
on this parameter.
NOTE: sets play rate on all LODs"""
for control in self.getAnimControls(animName, partName):
control.setPlayRate(rate)
def getDuration(self, animName=None, partName=None,
fromFrame=None, toFrame=None):
"""
Return duration of given anim name and given part.
If no anim specified, use the currently playing anim.
If no part specified, return anim duration of first part.
NOTE: returns info for arbitrary LOD
"""
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
animControl = controls[0]
if fromFrame is None:
fromFrame = 0
if toFrame is None:
toFrame = animControl.getNumFrames()-1
return ((toFrame+1)-fromFrame) / animControl.getFrameRate()
def getNumFrames(self, animName=None, partName=None):
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getNumFrames()
def getFrameTime(self, anim, frame, partName=None):
numFrames = self.getNumFrames(anim,partName)
animTime = self.getDuration(anim,partName)
frameTime = animTime * float(frame) / numFrames
return frameTime
def getCurrentAnim(self, partName=None):
"""
Return the anim currently playing on the actor. If part not
specified return current anim of an arbitrary part in dictionary.
NOTE: only returns info for an arbitrary LOD
"""
if len(self.__animControlDict.items()) == 0:
return
lodName, animControlDict = self.__animControlDict.items()[0]
if partName == None:
partName, animDict = animControlDict.items()[0]
else:
animDict = animControlDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
return None
# loop through all anims for named part and find if any are playing
for animName, anim in animDict.items():
if anim.animControl and anim.animControl.isPlaying():
return animName
# we must have found none, or gotten an error
return None
def getCurrentFrame(self, animName=None, partName=None):
"""
Return the current frame number of the named anim, or if no
anim is specified, then the anim current playing on the
actor. If part not specified return current anim of first part
in dictionary. NOTE: only returns info for an arbitrary LOD
"""
lodName, animControlDict = self.__animControlDict.items()[0]
if partName == None:
partName, animDict = animControlDict.items()[0]
else:
animDict = animControlDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
return None
if animName:
anim = animDict.get(animName)
if not anim:
Actor.notify.warning("couldn't find anim: %s" % (animName))
elif anim.animControl:
return anim.animControl.getFrame()
else:
# loop through all anims for named part and find if any are playing
for animName, anim in animDict.items():
if anim.animControl and anim.animControl.isPlaying():
return anim.animControl.getFrame()
# we must have found none, or gotten an error
return None
# arranging
def getPart(self, partName, lodName="lodRoot"):
"""
Find the named part in the optional named lod and return it, or
return None if not present
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
return partDef.partBundleNP
return None
def getPartBundle(self, partName, lodName="lodRoot"):
"""
Find the named part in the optional named lod and return its
associated PartBundle, or return None if not present
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
return partDef.getBundle()
return None
def removePart(self, partName, lodName="lodRoot"):
"""
Remove the geometry and animations of the named part of the
optional named lod if present.
NOTE: this will remove child geometry also!
"""
# find the corresponding part bundle dict
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
# remove the part
if (partName in partBundleDict):
partBundleDict[partName].partBundleNP.removeNode()
del(partBundleDict[partName])
# find the corresponding anim control dict
if self.mergeLODBundles:
lodName = 'common'
partDict = self.__animControlDict.get(lodName)
if not partDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
# remove the animations
if (partName in partDict):
del(partDict[partName])
def hidePart(self, partName, lodName="lodRoot"):
"""
Make the given part of the optionally given lod not render,
even though still in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.hide()
else:
Actor.notify.warning("no part named %s!" % (partName))
def showPart(self, partName, lodName="lodRoot"):
"""
Make the given part render while in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.show()
else:
Actor.notify.warning("no part named %s!" % (partName))
def showAllParts(self, partName, lodName="lodRoot"):
"""
Make the given part and all its children render while in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.show()
partDef.partBundleNP.getChildren().show()
else:
Actor.notify.warning("no part named %s!" % (partName))
def exposeJoint(self, node, partName, jointName, lodName="lodRoot",
localTransform = 0):
"""exposeJoint(self, NodePath, string, string, key="lodRoot")
Starts the joint animating the indicated node. As the joint
animates, it will transform the node by the corresponding
amount. This will replace whatever matrix is on the node each
frame. The default is to expose the net transform from the root,
but if localTransform is true, only the node's local transform
from its parent is exposed."""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
# Get a handle to the joint.
joint = bundle.findChild(jointName)
if node == None:
node = self.attachNewNode(jointName)
if (joint):
if localTransform:
joint.addLocalTransform(node.node())
else:
joint.addNetTransform(node.node())
else:
Actor.notify.warning("no joint named %s!" % (jointName))
return node
def stopJoint(self, partName, jointName, lodName="lodRoot"):
"""stopJoint(self, string, string, key="lodRoot")
Stops the joint from animating external nodes. If the joint
is animating a transform on a node, this will permanently stop
it. However, this does not affect vertex animations."""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
# Get a handle to the joint.
joint = bundle.findChild(jointName)
if (joint):
joint.clearNetTransforms()
joint.clearLocalTransforms()
else:
Actor.notify.warning("no joint named %s!" % (jointName))
def getJoints(self, partName = None, jointName = '*', lodName = None):
""" Returns the list of all joints, from the named part or
from all parts, that match the indicated jointName. The
jointName may include pattern characters like *. """
joints=[]
pattern = GlobPattern(jointName)
if lodName == None and self.mergeLODBundles:
# Get the common bundle.
partBundleDicts = [self.__commonBundleHandles]
elif lodName == None:
# Get all LOD's.
partBundleDicts = self.__partBundleDict.values()
else:
# Get one LOD.
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("couldn't find lod: %s" % (lodName))
return []
partBundleDicts = [partBundleDict]
for partBundleDict in partBundleDicts:
parts = []
if partName:
subpartDef = self.__subpartDict.get(partName, None)
if not subpartDef:
# Whole part
subset = None
partDef = partBundleDict.get(partName)
else:
# Sub-part
subset = subpartDef.subset
partDef = partBundleDict.get(subpartDef.truePartName)
if not partDef:
Actor.notify.warning("no part named %s!" % (partName))
return []
parts = [partDef]
else:
subset = None
parts = partBundleDict.values()
for partData in parts:
partBundle = partData.getBundle()
if not pattern.hasGlobCharacters() and not subset:
# The simple case.
joint = partBundle.findChild(jointName)
if joint:
joints.append(joint)
else:
# The more complex case.
isIncluded = True
if subset:
isIncluded = subset.isIncludeEmpty()
self.__getPartJoints(joints, pattern, partBundle, subset, isIncluded)
return joints
def getOverlappingJoints(self, partNameA, partNameB, jointName = '*', lodName = None):
""" Returns the set of joints, matching jointName, that are
shared between partNameA and partNameB. """
jointsA = set(self.getJoints(partName = partNameA, jointName = jointName, lodName = lodName))
jointsB = set(self.getJoints(partName = partNameB, jointName = jointName, lodName = lodName))
return jointsA & jointsB
def __getPartJoints(self, joints, pattern, partNode, subset, isIncluded):
""" Recursively walks the joint hierarchy to look for matching
joint names, implementing getJoints(). """
name = partNode.getName()
if subset:
# Constrain the traversal just to the named subset.
if subset.matchesInclude(name):
isIncluded = True
elif subset.matchesExclude(name):
isIncluded = False
if isIncluded and pattern.matches(name) and isinstance(partNode, MovingPartBase):
joints.append(partNode)
for child in partNode.getChildren():
self.__getPartJoints(joints, pattern, child, subset, isIncluded)
def getJointTransform(self, partName, jointName, lodName='lodRoot'):
partBundleDict=self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
joint = bundle.findChild(jointName)
if joint == None:
Actor.notify.warning("no joint named %s!" % (jointName))
return None
return joint.getDefaultValue()
def getJointTransformState(self, partName, jointName, lodName='lodRoot'):
partBundleDict=self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
joint = bundle.findChild(jointName)
if joint == None:
Actor.notify.warning("no joint named %s!" % (jointName))
return None
return joint.getTransformState()
def controlJoint(self, node, partName, jointName, lodName="lodRoot"):
"""The converse of exposeJoint: this associates the joint with
the indicated node, so that the joint transform will be copied
from the node to the joint each frame. This can be used for
programmer animation of a particular joint at runtime.
The parameter node should be the NodePath for the node whose
transform will animate the joint. If node is None, a new node
will automatically be created and loaded with the joint's
initial transform. In either case, the node used will be
returned.
It used to be necessary to call this before any animations
have been loaded and bound, but that is no longer so.
"""
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
anyGood = False
for bundleDict in self.__partBundleDict.values():
bundle = bundleDict[trueName].getBundle()
if node == None:
node = self.attachNewNode(ModelNode(jointName))
joint = bundle.findChild(jointName)
if joint and isinstance(joint, MovingPartMatrix):
node.setMat(joint.getDefaultValue())
if bundle.controlJoint(jointName, node.node()):
anyGood = True
if not anyGood:
self.notify.warning("Cannot control joint %s" % (jointName))
return node
def freezeJoint(self, partName, jointName, transform = None,
pos=Vec3(0,0,0), hpr=Vec3(0,0,0), scale=Vec3(1,1,1)):
"""Similar to controlJoint, but the transform assigned is
static, and may not be animated at runtime (without another
subsequent call to freezeJoint). This is slightly more
optimal than controlJoint() for cases in which the transform
is not intended to be animated during the lifetime of the
Actor. """
if transform == None:
transform = TransformState.makePosHprScale(pos, hpr, scale)
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
anyGood = False
for bundleDict in self.__partBundleDict.values():
if bundleDict[trueName].getBundle().freezeJoint(jointName, transform):
anyGood = True
if not anyGood:
self.notify.warning("Cannot freeze joint %s" % (jointName))
def releaseJoint(self, partName, jointName):
"""Undoes a previous call to controlJoint() or freezeJoint()
and restores the named joint to its normal animation. """
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
for bundleDict in self.__partBundleDict.values():
bundleDict[trueName].getBundle().releaseJoint(jointName)
def instance(self, path, partName, jointName, lodName="lodRoot"):
"""instance(self, NodePath, string, string, key="lodRoot")
Instance a nodePath to an actor part at a joint called jointName"""
partBundleDict = self.__partBundleDict.get(lodName)
if partBundleDict:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
joint = partDef.partBundleNP.find("**/" + jointName)
if (joint.isEmpty()):
Actor.notify.warning("%s not found!" % (jointName))
else:
return path.instanceTo(joint)
else:
Actor.notify.warning("no part named %s!" % (partName))
else:
Actor.notify.warning("no lod named %s!" % (lodName))
def attach(self, partName, anotherPartName, jointName, lodName="lodRoot"):
"""attach(self, string, string, string, key="lodRoot")
Attach one actor part to another at a joint called jointName"""
partBundleDict = self.__partBundleDict.get(lodName)
if partBundleDict:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
anotherPartDef = partBundleDict.get(anotherPartName)
if anotherPartDef:
joint = anotherPartDef.partBundleNP.find("**/" + jointName)
if (joint.isEmpty()):
Actor.notify.warning("%s not found!" % (jointName))
else:
partDef.partBundleNP.reparentTo(joint)
else:
Actor.notify.warning("no part named %s!" % (anotherPartName))
else:
Actor.notify.warning("no part named %s!" % (partName))
else:
Actor.notify.warning("no lod named %s!" % (lodName))
def drawInFront(self, frontPartName, backPartName, mode,
root=None, lodName=None):
"""drawInFront(self, string, int, string=None, key=None)
Arrange geometry so the frontPart(s) are drawn in front of
backPart.
If mode == -1, the geometry is simply arranged to be drawn in
the correct order, assuming it is already under a
direct-render scene graph (like the DirectGui system). That
is, frontPart is reparented to backPart, and backPart is
reordered to appear first among its siblings.
If mode == -2, the geometry is arranged to be drawn in the
correct order, and depth test/write is turned off for
frontPart.
If mode == -3, frontPart is drawn as a decal onto backPart.
This assumes that frontPart is mostly coplanar with and does
not extend beyond backPart, and that backPart is mostly flat
(not self-occluding).
If mode > 0, the frontPart geometry is placed in the 'fixed'
bin, with the indicated drawing order. This will cause it to
be drawn after almost all other geometry. In this case, the
backPartName is actually unused.
Takes an optional argument root as the start of the search for the
given parts. Also takes optional lod name to refine search for the
named parts. If root and lod are defined, we search for the given
root under the given lod.
"""
# check to see if we are working within an lod
if lodName != None:
# find the named lod node
lodRoot = self.__LODNode.find(str(lodName))
if root == None:
# no need to look further
root = lodRoot
else:
# look for root under lod
root = lodRoot.find("**/" + root)
else:
# start search from self if no root and no lod given
if root == None:
root = self
frontParts = root.findAllMatches("**/" + frontPartName)
if mode > 0:
# Use the 'fixed' bin instead of reordering the scene
# graph.
numFrontParts = frontParts.getNumPaths()
for partNum in range(0, numFrontParts):
frontParts[partNum].setBin('fixed', mode)
return
if mode == -2:
# Turn off depth test/write on the frontParts.
numFrontParts = frontParts.getNumPaths()
for partNum in range(0, numFrontParts):
frontParts[partNum].setDepthWrite(0)
frontParts[partNum].setDepthTest(0)
# Find the back part.
backPart = root.find("**/" + backPartName)
if (backPart.isEmpty()):
Actor.notify.warning("no part named %s!" % (backPartName))
return
if mode == -3:
# Draw as a decal.
backPart.node().setEffect(DecalEffect.make())
else:
# Reorder the backPart to be the first of its siblings.
backPart.reparentTo(backPart.getParent(), -1)
#reparent all the front parts to the back part
frontParts.reparentTo(backPart)
def fixBounds(self, partName = None):
if(partName == None):
#iterate through everything
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().update()
geomNodes = char.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in xrange(numGeomNodes):
thisGeomNode = geomNodes.getPath(nodeNum)
numGeoms = thisGeomNode.node().getNumGeoms()
for geomNum in xrange(numGeoms):
thisGeom = thisGeomNode.node().getGeom(geomNum)
thisGeom.markBoundsStale()
thisGeomNode.node().markInternalBoundsStale()
else:
#iterate through for a specific part
for lodData in self.__partBundleDict.values():
partData = lodData.get(partName)
if(partData):
char = partData.partBundleNP
char.node().update()
geomNodes = char.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in xrange(numGeomNodes):
thisGeomNode = geomNodes.getPath(nodeNum)
numGeoms = thisGeomNode.node().getNumGeoms()
for geomNum in xrange(numGeoms):
thisGeom = thisGeomNode.node().getGeom(geomNum)
thisGeom.markBoundsStale()
thisGeomNode.node().markInternalBoundsStale()
def fixBounds_old(self, part=None):
"""fixBounds(self, nodePath=None)
Force recomputation of bounding spheres for all geoms
in a given part. If no part specified, fix all geoms
in this actor
"""
# if no part name specified fix all parts
if (part==None):
part = self
# update all characters first
charNodes = part.findAllMatches("**/+Character")
numCharNodes = charNodes.getNumPaths()
for charNum in range(0, numCharNodes):
(charNodes.getPath(charNum)).node().update()
# for each geomNode, iterate through all geoms and force update
# of bounding spheres by marking current bounds as stale
geomNodes = part.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in range(0, numGeomNodes):
thisGeomNode = geomNodes.getPath(nodeNum)
numGeoms = thisGeomNode.node().getNumGeoms()
for geomNum in range(0, numGeoms):
thisGeom = thisGeomNode.node().getGeom(geomNum)
thisGeom.markBoundsStale()
assert Actor.notify.debug("fixing bounds for node %s, geom %s" % \
(nodeNum, geomNum))
thisGeomNode.node().markInternalBoundsStale()
def showAllBounds(self):
"""
Show the bounds of all actor geoms
"""
geomNodes = self.__geomNode.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in range(0, numGeomNodes):
geomNodes.getPath(nodeNum).showBounds()
def hideAllBounds(self):
"""
Hide the bounds of all actor geoms
"""
geomNodes = self.__geomNode.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in range(0, numGeomNodes):
geomNodes.getPath(nodeNum).hideBounds()
# actions
def animPanel(self):
# Don't use a regular import, to prevent ModuleFinder from picking
# it up as a dependency when building a .p3d package.
import importlib
AnimPanel = importlib.import_module('direct.tkpanels.AnimPanel')
return AnimPanel.AnimPanel(self)
def stop(self, animName=None, partName=None):
"""stop(self, string=None, string=None)
Stop named animation on the given part of the actor.
If no name specified then stop all animations on the actor.
NOTE: stops all LODs"""
for control in self.getAnimControls(animName, partName):
control.stop()
def play(self, animName, partName=None, fromFrame=None, toFrame=None):
"""play(self, string, string=None)
Play the given animation on the given part of the actor.
If no part is specified, try to play on all parts. NOTE:
plays over ALL LODs"""
if fromFrame == None:
for control in self.getAnimControls(animName, partName):
control.play()
else:
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.play(fromFrame, control.getNumFrames() - 1)
else:
control.play(fromFrame, toFrame)
def loop(self, animName, restart=1, partName=None,
fromFrame=None, toFrame=None):
"""loop(self, string, int=1, string=None)
Loop the given animation on the given part of the actor,
restarting at zero frame if requested. If no part name
is given then try to loop on all parts. NOTE: loops on
all LOD's
"""
if fromFrame == None:
for control in self.getAnimControls(animName, partName):
control.loop(restart)
else:
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.loop(restart, fromFrame, control.getNumFrames() - 1)
else:
control.loop(restart, fromFrame, toFrame)
def pingpong(self, animName, restart=1, partName=None,
fromFrame=None, toFrame=None):
"""pingpong(self, string, int=1, string=None)
Loop the given animation on the given part of the actor,
restarting at zero frame if requested. If no part name
is given then try to loop on all parts. NOTE: loops on
all LOD's"""
if fromFrame == None:
fromFrame = 0
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.pingpong(restart, fromFrame, control.getNumFrames() - 1)
else:
control.pingpong(restart, fromFrame, toFrame)
def pose(self, animName, frame, partName=None, lodName=None):
"""pose(self, string, int, string=None)
Pose the actor in position found at given frame in the specified
animation for the specified part. If no part is specified attempt
to apply pose to all parts."""
for control in self.getAnimControls(animName, partName, lodName):
control.pose(frame)
def setBlend(self, animBlend = None, frameBlend = None,
blendType = None, partName = None):
"""
Changes the way the Actor handles blending of multiple
different animations, and/or interpolation between consecutive
frames.
The animBlend and frameBlend parameters are boolean flags.
You may set either or both to True or False. If you do not
specify them, they do not change from the previous value.
When animBlend is True, multiple different animations may
simultaneously be playing on the Actor. This means you may
call play(), loop(), or pose() on multiple animations and have
all of them contribute to the final pose each frame.
In this mode (that is, when animBlend is True), starting a
particular animation with play(), loop(), or pose() does not
implicitly make the animation visible; you must also call
setControlEffect() for each animation you wish to use to
indicate how much each animation contributes to the final
pose.
The frameBlend flag is unrelated to playing multiple
animations. It controls whether the Actor smoothly
interpolates between consecutive frames of its animation (when
the flag is True) or holds each frame until the next one is
ready (when the flag is False). The default value of
frameBlend is controlled by the interpolate-frames Config.prc
variable.
In either case, you may also specify blendType, which controls
the precise algorithm used to blend two or more different
matrix values into a final result. Different skeleton
hierarchies may benefit from different algorithms. The
default blendType is controlled by the anim-blend-type
Config.prc variable.
"""
for bundle in self.getPartBundles(partName = partName):
if blendType != None:
bundle.setBlendType(blendType)
if animBlend != None:
bundle.setAnimBlendFlag(animBlend)
if frameBlend != None:
bundle.setFrameBlendFlag(frameBlend)
def enableBlend(self, blendType = PartBundle.BTNormalizedLinear, partName = None):
"""
Enables blending of multiple animations simultaneously.
After this is called, you may call play(), loop(), or pose()
on multiple animations and have all of them contribute to the
final pose each frame.
With blending in effect, starting a particular animation with
play(), loop(), or pose() does not implicitly make the
animation visible; you must also call setControlEffect() for
each animation you wish to use to indicate how much each
animation contributes to the final pose.
This method is deprecated. You should use setBlend() instead.
"""
self.setBlend(animBlend = True, blendType = blendType, partName = partName)
def disableBlend(self, partName = None):
"""
Restores normal one-animation-at-a-time operation after a
previous call to enableBlend().
This method is deprecated. You should use setBlend() instead.
"""
self.setBlend(animBlend = False, partName = partName)
def setControlEffect(self, animName, effect,
partName = None, lodName = None):
"""
Sets the amount by which the named animation contributes to
the overall pose. This controls blending of multiple
animations; it only makes sense to call this after a previous
call to setBlend(animBlend = True).
"""
for control in self.getAnimControls(animName, partName, lodName):
control.getPart().setControlEffect(control, effect)
def getAnimFilename(self, animName, partName='modelRoot'):
"""
getAnimFilename(self, animName)
return the animFilename given the animName
"""
if self.mergeLODBundles:
lodName = 'common'
elif self.switches:
lodName = str(self.switches.keys()[0])
else:
lodName = 'lodRoot'
try:
return self.__animControlDict[lodName][partName][animName].filename
except:
return None
def getAnimControl(self, animName, partName=None, lodName=None,
allowAsyncBind = True):
"""
getAnimControl(self, string, string, string="lodRoot")
Search the animControl dictionary indicated by lodName for
a given anim and part. If none specified, try the first part and lod.
Return the animControl if present, or None otherwise.
"""
if not partName:
partName = 'modelRoot'
if self.mergeLODBundles:
lodName = 'common'
elif not lodName:
if self.switches:
lodName = str(self.switches.keys()[0])
else:
lodName = 'lodRoot'
partDict = self.__animControlDict.get(lodName)
# if this assertion fails, named lod was not present
assert partDict != None
animDict = partDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
else:
anim = animDict.get(animName)
if anim == None:
# anim was not present
assert Actor.notify.debug("couldn't find anim: %s" % (animName))
pass
else:
# bind the animation first if we need to
if not anim.animControl:
self.__bindAnimToPart(animName, partName, lodName,
allowAsyncBind = allowAsyncBind)
elif not allowAsyncBind:
anim.animControl.waitPending()
return anim.animControl
return None
def getAnimControls(self, animName=None, partName=None, lodName=None,
allowAsyncBind = True):
"""getAnimControls(self, string, string=None, string=None)
Returns a list of the AnimControls that represent the given
animation for the given part and the given lod.
If animName is None or omitted, the currently-playing
animation (or all currently-playing animations) is returned.
If animName is True, all animations are returned. If animName
is a single string name, that particular animation is
returned. If animName is a list of string names, all of the
names animations are returned.
If partName is None or omitted, all parts are returned (or
possibly the one overall Actor part, according to the
subpartsComplete flag).
If lodName is None or omitted, all LOD's are returned.
"""
if partName == None and self.__subpartsComplete:
# If we have the __subpartsComplete flag, and no partName
# is specified, it really means to play the animation on
# all subparts, not on the overall Actor.
partName = self.__subpartDict.keys()
controls = []
# build list of lodNames and corresponding animControlDicts
# requested.
if lodName == None or self.mergeLODBundles:
# Get all LOD's
animControlDictItems = self.__animControlDict.items()
else:
partDict = self.__animControlDict.get(lodName)
if partDict == None:
Actor.notify.warning("couldn't find lod: %s" % (lodName))
animControlDictItems = []
else:
animControlDictItems = [(lodName, partDict)]
for lodName, partDict in animControlDictItems:
# Now, build the list of partNames and the corresponding
# animDicts.
if partName == None:
# Get all main parts, but not sub-parts.
animDictItems = []
for thisPart, animDict in partDict.items():
if thisPart not in self.__subpartDict:
animDictItems.append((thisPart, animDict))
else:
# Get exactly the named part or parts.
if isinstance(partName, types.StringTypes):
partNameList = [partName]
else:
partNameList = partName
animDictItems = []
for pName in partNameList:
animDict = partDict.get(pName)
if animDict == None:
# Maybe it's a subpart that hasn't been bound yet.
subpartDef = self.__subpartDict.get(pName)
if subpartDef:
animDict = {}
partDict[pName] = animDict
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (pName))
else:
animDictItems.append((pName, animDict))
if animName is None:
# get all playing animations
for thisPart, animDict in animDictItems:
for anim in animDict.values():
if anim.animControl and anim.animControl.isPlaying():
controls.append(anim.animControl)
else:
# get the named animation(s) only.
if isinstance(animName, types.StringTypes):
# A single animName
animNameList = [animName]
else:
# A list of animNames, or True to indicate all anims.
animNameList = animName
for thisPart, animDict in animDictItems:
names = animNameList
if animNameList is True:
names = animDict.keys()
for animName in names:
anim = animDict.get(animName)
if anim == None and partName != None:
for pName in partNameList:
# Maybe it's a subpart that hasn't been bound yet.
subpartDef = self.__subpartDict.get(pName)
if subpartDef:
truePartName = subpartDef.truePartName
anim = partDict[truePartName].get(animName)
if anim:
anim = anim.makeCopy()
animDict[animName] = anim
if anim == None:
# anim was not present
assert Actor.notify.debug("couldn't find anim: %s" % (animName))
pass
else:
# bind the animation first if we need to
animControl = anim.animControl
if animControl == None:
animControl = self.__bindAnimToPart(
animName, thisPart, lodName,
allowAsyncBind = allowAsyncBind)
elif not allowAsyncBind:
# Force the animation to load if it's
# not already loaded.
animControl.waitPending()
if animControl:
controls.append(animControl)
return controls
def loadModel(self, modelPath, partName="modelRoot", lodName="lodRoot",
copy = True, okMissing = None, autoBindAnims = True):
"""Actor model loader. Takes a model name (ie file path), a part
name(defaults to "modelRoot") and an lod name(defaults to "lodRoot").
"""
assert partName not in self.__subpartDict
assert Actor.notify.debug("in loadModel: %s, part: %s, lod: %s, copy: %s" % \
(modelPath, partName, lodName, copy))
if isinstance(modelPath, NodePath):
# If we got a NodePath instead of a string, use *that* as
# the model directly.
if (copy):
model = modelPath.copyTo(NodePath())
else:
model = modelPath
else:
# otherwise, we got the name of the model to load.
loaderOptions = self.modelLoaderOptions
if not copy:
# If copy = 0, then we should always hit the disk.
loaderOptions = LoaderOptions(loaderOptions)
loaderOptions.setFlags(loaderOptions.getFlags() & ~LoaderOptions.LFNoRamCache)
# Pass loaderOptions to specify that we want to
# get the skeleton model. This only matters to model
# files (like .mb) for which we can choose to extract
# either the skeleton or animation, or neither.
model = loader.loadModel(modelPath, loaderOptions = loaderOptions, okMissing = okMissing)
if (model == None):
raise StandardError, "Could not load Actor model %s" % (modelPath)
if (model.node().isOfType(Character.getClassType())):
bundleNP = model
else:
bundleNP = model.find("**/+Character")
if (bundleNP.isEmpty()):
Actor.notify.warning("%s is not a character!" % (modelPath))
model.reparentTo(self.__geomNode)
else:
# Maybe the model file also included some animations. If
# so, try to bind them immediately and put them into the
# animControlDict.
if autoBindAnims:
acc = AnimControlCollection()
autoBind(model.node(), acc, ~0)
numAnims = acc.getNumAnims()
else:
numAnims = 0
# Now extract out the Character and integrate it with
# the Actor.
if (lodName!="lodRoot"):
# parent to appropriate node under LOD switch
bundleNP.reparentTo(self.__LODNode.find(str(lodName)))
else:
bundleNP.reparentTo(self.__geomNode)
self.__prepareBundle(bundleNP, model.node(), partName, lodName)
# we rename this node to make Actor copying easier
bundleNP.node().setName("%s%s"%(Actor.partPrefix,partName))
if numAnims != 0:
# If the model had some animations, store them in the
# dict so they can be played.
Actor.notify.info("model contains %s animations." % (numAnims))
# make sure this lod is in anim control dict
if self.mergeLODBundles:
lodName = 'common'
self.__animControlDict.setdefault(lodName, {})
self.__animControlDict[lodName].setdefault(partName, {})
for i in range(numAnims):
animControl = acc.getAnim(i)
animName = acc.getAnimName(i)
animDef = Actor.AnimDef()
animDef.animControl = animControl
self.__animControlDict[lodName][partName][animName] = animDef
def __prepareBundle(self, bundleNP, partModel,
partName="modelRoot", lodName="lodRoot"):
assert partName not in self.__subpartDict
# Rename the node at the top of the hierarchy, if we
# haven't already, to make it easier to identify this
# actor in the scene graph.
if not self.gotName:
self.node().setName(bundleNP.node().getName())
self.gotName = 1
bundleDict = self.__partBundleDict.get(lodName, None)
if bundleDict == None:
# make a dictionary to store these parts in
bundleDict = {}
self.__partBundleDict[lodName] = bundleDict
self.__updateSortedLODNames()
node = bundleNP.node()
# A model loaded from disk will always have just one bundle.
assert(node.getNumBundles() == 1)
bundleHandle = node.getBundleHandle(0)
if self.mergeLODBundles:
loadedBundleHandle = self.__commonBundleHandles.get(partName, None)
if loadedBundleHandle:
# We've already got a bundle for this part; merge it.
node.mergeBundles(bundleHandle, loadedBundleHandle)
bundleHandle = loadedBundleHandle
else:
# We haven't already got a bundle for this part; store it.
self.__commonBundleHandles[partName] = bundleHandle
bundleDict[partName] = Actor.PartDef(bundleNP, bundleHandle, partModel)
def makeSubpart(self, partName, includeJoints, excludeJoints = [],
parent="modelRoot", overlapping = False):
"""Defines a new "part" of the Actor that corresponds to the
same geometry as the named parent part, but animates only a
certain subset of the joints. This can be used for
partial-body animations, for instance to animate a hand waving
while the rest of the body continues to play its walking
animation.
includeJoints is a list of joint names that are to be animated
by the subpart. Each name can include globbing characters
like '?' or '*', which will match one or any number of
characters, respectively. Including a joint by naming it in
includeJoints implicitly includes all of the descendents of
that joint as well, except for excludeJoints, below.
excludeJoints is a list of joint names that are *not* to be
animated by the subpart. As in includeJoints, each name can
include globbing characters. If a joint is named by
excludeJoints, it will not be included (and neither will any
of its descendents), even if a parent joint was named by
includeJoints.
if overlapping is False, an error is raised (in the dev build)
if this subpart shares joints with any other subparts. If
overlapping is True, no such error is raised.
parent is the actual partName that this subpart is based
on."""
assert partName not in self.__subpartDict
subpartDef = self.__subpartDict.get(parent, Actor.SubpartDef(''))
subset = PartSubset(subpartDef.subset)
for name in includeJoints:
subset.addIncludeJoint(GlobPattern(name))
for name in excludeJoints:
subset.addExcludeJoint(GlobPattern(name))
self.__subpartDict[partName] = Actor.SubpartDef(parent, subset)
if __dev__ and not overlapping and self.validateSubparts.getValue():
# Without the overlapping flag True, we're not allowed to
# define overlapping sub-parts. Verify that we haven't.
for otherPartName, otherPartDef in self.__subpartDict.items():
if otherPartName != partName and otherPartDef.truePartName == parent:
joints = self.getOverlappingJoints(partName, otherPartName)
if joints:
raise StandardError, 'Overlapping joints: %s and %s' % (partName, otherPartName)
def setSubpartsComplete(self, flag):
"""Sets the subpartsComplete flag. This affects the behavior
of play(), loop(), stop(), etc., when no explicit parts are
specified.
When this flag is False (the default), play() with no parts
means to play the animation on the overall Actor, which is a
separate part that overlaps each of the subparts. If you then
play a different animation on a subpart, it may stop the
overall animation (in non-blend mode) or blend with it (in
blend mode).
When this flag is True, play() with no parts means to play the
animation on each of the subparts--instead of on the overall
Actor. In this case, you may then play a different animation
on a subpart, which replaces only that subpart's animation.
It makes sense to set this True when the union of all of your
subparts completely defines the entire Actor.
"""
self.__subpartsComplete = flag
if __dev__ and self.__subpartsComplete and self.validateSubparts.getValue():
# If we've specified any parts at all so far, make sure we've
# specified all of them.
if self.__subpartDict:
self.verifySubpartsComplete()
def getSubpartsComplete(self):
"""See setSubpartsComplete()."""
return self.__subpartsComplete
def verifySubpartsComplete(self, partName = None, lodName = None):
""" Ensures that each joint is defined by at least one
subPart. Prints a warning if this is not the case. """
if partName:
assert partName not in self.__subpartDict
partNames = [partName]
else:
if lodName:
partNames = self.__partBundleDict[lodName].keys()
else:
partNames = self.__partBundleDict.values()[0].keys()
for partName in partNames:
subJoints = set()
for subPartName, subPartDef in self.__subpartDict.items():
if subPartName != partName and subPartDef.truePartName == partName:
subJoints |= set(self.getJoints(partName = subPartName, lodName = lodName))
allJoints = set(self.getJoints(partName = partName, lodName = lodName))
diff = allJoints.difference(subJoints)
if diff:
self.notify.warning('Uncovered joints: %s' % (list(diff)))
def loadAnims(self, anims, partName="modelRoot", lodName="lodRoot"):
"""loadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim loader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults
to 'lodRoot' for non-LOD actors) and dict of corresponding
anims in the form animName:animPath{}
"""
reload = True
if self.mergeLODBundles:
lodNames = ['common']
elif lodName == 'all':
reload = False
lodNames = self.switches.keys()
lodNames.sort()
for i in range(0,len(lodNames)):
lodNames[i] = str(lodNames[i])
else:
lodNames = [lodName]
assert Actor.notify.debug("in loadAnims: %s, part: %s, lod: %s" %
(anims, partName, lodNames[0]))
firstLoad = True
if not reload:
try:
self.__animControlDict[lodNames[0]][partName]
firstLoad = False
except:
pass
for lName in lodNames:
if firstLoad:
self.__animControlDict.setdefault(lName, {})
self.__animControlDict[lName].setdefault(partName, {})
for animName, filename in anims.items():
# make sure this lod is in anim control dict
for lName in lodNames:
if firstLoad:
self.__animControlDict[lName][partName][animName] = Actor.AnimDef()
if isinstance(filename, NodePath):
# We were given a pre-load anim bundle, not a filename.
assert not filename.isEmpty()
if filename.node().isOfType(AnimBundleNode.getClassType()):
animBundleNP = filename
else:
animBundleNP = filename.find('**/+AnimBundleNode')
assert not animBundleNP.isEmpty()
self.__animControlDict[lName][partName][animName].animBundle = animBundleNP.node().getBundle()
else:
# We were given a filename that must be loaded.
# Store the filename only; we will load and bind
# it (and produce an AnimControl) when it is
# played.
self.__animControlDict[lName][partName][animName].filename = filename
def initAnimsOnAllLODs(self,partNames):
if self.mergeLODBundles:
lodNames = ['common']
else:
lodNames = self.__partBundleDict.keys()
for lod in lodNames:
for part in partNames:
self.__animControlDict.setdefault(lod,{})
self.__animControlDict[lod].setdefault(part, {})
#for animName, filename in anims.items():
# # make sure this lod is in anim control dict
# for lod in self.__partBundleDict.keys():
# # store the file path only; we will bind it (and produce
# # an AnimControl) when it is played
#
# self.__animControlDict[lod][partName][animName] = Actor.AnimDef(filename)
def loadAnimsOnAllLODs(self, anims,partName="modelRoot"):
"""loadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim loader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults
to 'lodRoot' for non-LOD actors) and dict of corresponding
anims in the form animName:animPath{}
"""
if self.mergeLODBundles:
lodNames = ['common']
else:
lodNames = self.__partBundleDict.keys()
for animName, filename in anims.items():
# make sure this lod is in anim control dict
for lod in lodNames:
# store the file path only; we will bind it (and produce
# an AnimControl) when it is played
self.__animControlDict[lod][partName][animName]= Actor.AnimDef(filename)
def postFlatten(self):
"""Call this after performing an aggressive flatten operation,
such as flattenStrong(), that involves the Actor. This is
especially necessary when mergeLODBundles is true, since this
kind of actor may be broken after a flatten operation; this
method should restore proper Actor functionality. """
if self.mergeLODBundles:
# Re-merge all bundles, and restore the common bundle map.
self.__commonBundleHandles = {}
for lodName, bundleDict in self.__partBundleDict.items():
for partName, partDef in bundleDict.items():
loadedBundleHandle = self.__commonBundleHandles.get(partName, None)
node = partDef.partBundleNP.node()
if loadedBundleHandle:
node.mergeBundles(partDef.partBundleHandle, loadedBundleHandle)
partDef.partBundleHandle = loadedBundleHandle
else:
self.__commonBundleHandles[partName] = partDef.partBundleHandle
# Since we may have merged together some bundles, all of
# our anims are now suspect. Force them to reload.
self.unloadAnims()
def unloadAnims(self, anims=None, partName=None, lodName=None):
"""unloadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim unloader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults to
'lodRoot' for non-LOD actors) and list of animation
names. Deletes the anim control for the given animation and
parts/lods.
If any parameter is None or omitted, it means all of them.
"""
assert Actor.notify.debug("in unloadAnims: %s, part: %s, lod: %s" %
(anims, partName, lodName))
if lodName == None or self.mergeLODBundles:
lodNames = self.__animControlDict.keys()
else:
lodNames = [lodName]
if (partName == None):
if len(lodNames) > 0:
partNames = self.__animControlDict[lodNames[0]].keys()
else:
partNames = []
else:
partNames = [partName]
if (anims==None):
for lodName in lodNames:
for partName in partNames:
for animDef in self.__animControlDict[lodName][partName].values():
if animDef.animControl != None:
# Try to clear any control effects before we let
# our handle on them go. This is especially
# important if the anim control was blending
# animations.
animDef.animControl.getPart().clearControlEffects()
animDef.animControl = None
else:
for lodName in lodNames:
for partName in partNames:
for anim in anims:
animDef = self.__animControlDict[lodName][partName].get(anim)
if animDef and animDef.animControl != None:
# Try to clear any control effects before we let
# our handle on them go. This is especially
# important if the anim control was blending
# animations.
animDef.animControl.getPart().clearControlEffects()
animDef.animControl = None
def bindAnim(self, animName, partName = None, lodName = None,
allowAsyncBind = False):
"""
Binds the named animation to the named part and/or lod. If
allowAsyncBind is False, this guarantees that the animation is
bound immediately--the animation is never bound in a
sub-thread; it will be loaded and bound in the main thread, so
it will be available by the time this method returns.
The parameters are the same as that for getAnimControls(). In
fact, this method is a thin wrapper around that other method.
Use this method if you need to ensure that an animation is
available before you start to play it, and you don't mind
holding up the render for a frame or two until the animation
is available.
"""
self.getAnimControls(animName = animName, partName = partName,
lodName = lodName,
allowAsyncBind = allowAsyncBind)
def bindAllAnims(self, allowAsyncBind = False):
"""Loads and binds all animations that have been defined for
the Actor. """
self.getAnimControls(animName = True, allowAsyncBind = allowAsyncBind)
def waitPending(self, partName = None):
"""Blocks until all asynchronously pending animations (that
are currently playing) have been loaded and bound the the
Actor. Call this after calling play() if you are using
asynchronous binds, but you need this particular animation
to be loaded immediately. """
for bundle in self.getPartBundles(partName = partName):
bundle.waitPending()
def __bindAnimToPart(self, animName, partName, lodName,
allowAsyncBind = True):
"""
Binds the named animation to the named part/lod and returns
the associated animControl. The animation is loaded and bound
in a sub-thread, if allowAsyncBind is True,
self.allowAsyncBind is True, threading is enabled, and the
animation has a preload table generated for it (e.g. via
"egg-optchar -preload"). Even though the animation may or may
not be yet bound at the time this function returns, a usable
animControl is returned, or None if the animation could not be
bound.
"""
# make sure this anim is in the dict
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDict = self.__animControlDict[lodName]
animDict = partDict.get(partName)
if animDict == None:
# It must be a subpart that hasn't been bound yet.
animDict = {}
partDict[partName] = animDict
anim = animDict.get(animName)
if anim == None:
# It must be a subpart that hasn't been bound yet.
anim = partDict[subpartDef.truePartName].get(animName)
anim = anim.makeCopy()
animDict[animName] = anim
if anim == None:
Actor.notify.error("actor has no animation %s", animName)
# only bind if not already bound!
if anim.animControl:
return anim.animControl
if self.mergeLODBundles:
bundle = self.__commonBundleHandles[subpartDef.truePartName].getBundle()
else:
bundle = self.__partBundleDict[lodName][subpartDef.truePartName].getBundle()
if anim.animBundle:
# We already have a bundle; just bind it.
animControl = bundle.bindAnim(anim.animBundle, -1, subpartDef.subset)
else:
# Load and bind the anim. This might be an asynchronous
# operation that will complete in the background, but if so it
# will still return a usable AnimControl.
animControl = bundle.loadBindAnim(
loader.loader, Filename(anim.filename), -1,
subpartDef.subset, allowAsyncBind and self.allowAsyncBind)
if not animControl:
# Couldn't bind. (This implies the binding operation was
# not attempted asynchronously.)
return None
# store the animControl
anim.animControl = animControl
assert Actor.notify.debug("binding anim: %s to part: %s, lod: %s" %
(animName, partName, lodName))
return animControl
def __copyPartBundles(self, other):
"""__copyPartBundles(self, Actor)
Copy the part bundle dictionary from another actor as this
instance's own. NOTE: this method does not actually copy geometry
"""
for lodName in other.__partBundleDict.keys():
# find the lod Asad
if lodName == 'lodRoot':
partLod = self
else:
partLod = self.__LODNode.find(str(lodName))
if partLod.isEmpty():
Actor.notify.warning("no lod named: %s" % (lodName))
return None
for partName, partDef in other.__partBundleDict[lodName].items():
# We can really only copy from a non-flattened avatar.
assert partDef.partBundleNP.node().getNumBundles() == 1
# find the part in our tree
bundleNP = partLod.find("**/%s%s"%(Actor.partPrefix,partName))
if (bundleNP != None):
# store the part bundle
self.__prepareBundle(bundleNP, partDef.partModel,
partName, lodName)
else:
Actor.notify.error("lod: %s has no matching part: %s" %
(lodName, partName))
def __copySubpartDict(self, other):
"""Copies the subpartDict from another as this instance's own.
This makes a deep copy of the map and all of the names and
PartSubset objects within it. We can't use copy.deepcopy()
because of the included C++ PartSubset objects."""
self.__subpartDict = {}
for partName, subpartDef in other.__subpartDict.items():
subpartDefCopy = subpartDef
if subpartDef:
subpartDef = subpartDef.makeCopy()
self.__subpartDict[partName] = subpartDef
def __copyAnimControls(self, other):
"""__copyAnimControls(self, Actor)
Get the anims from the anim control's in the anim control
dictionary of another actor. Bind these anim's to the part
bundles in our part bundle dict that have matching names, and
store the resulting anim controls in our own part bundle dict"""
assert(other.mergeLODBundles == self.mergeLODBundles)
for lodName in other.__animControlDict.keys():
self.__animControlDict[lodName] = {}
for partName in other.__animControlDict[lodName].keys():
self.__animControlDict[lodName][partName] = {}
for animName in other.__animControlDict[lodName][partName].keys():
anim = other.__animControlDict[lodName][partName][animName]
anim = anim.makeCopy()
self.__animControlDict[lodName][partName][animName] = anim
def actorInterval(self, *args, **kw):
from direct.interval import ActorInterval
return ActorInterval.ActorInterval(self, *args, **kw)
def getAnimBlends(self, animName=None, partName=None, lodName=None):
""" Returns a list of the form:
[ (lodName, [(animName, [(partName, effect), (partName, effect), ...]),
(animName, [(partName, effect), (partName, effect), ...]),
...]),
(lodName, [(animName, [(partName, effect), (partName, effect), ...]),
(animName, [(partName, effect), (partName, effect), ...]),
...]),
... ]
This list reports the non-zero control effects for each
partName within a particular animation and LOD. """
result = []
if animName is None:
animNames = self.getAnimNames()
else:
animNames = [animName]
if lodName is None:
lodNames = self.getLODNames()
if self.mergeLODBundles:
lodNames = lodNames[:1]
else:
lodNames = [lodName]
if partName == None and self.__subpartsComplete:
partNames = self.__subpartDict.keys()
else:
partNames = [partName]
for lodName in lodNames:
animList = []
for animName in animNames:
blendList = []
for partName in partNames:
control = self.getAnimControl(animName, partName, lodName)
if control:
part = control.getPart()
effect = part.getControlEffect(control)
if effect > 0.:
blendList.append((partName, effect))
if blendList:
animList.append((animName, blendList))
if animList:
result.append((lodName, animList))
return result
def printAnimBlends(self, animName=None, partName=None, lodName=None):
for lodName, animList in self.getAnimBlends(animName, partName, lodName):
print 'LOD %s:' % (lodName)
for animName, blendList in animList:
list = []
for partName, effect in blendList:
list.append('%s:%.3f' % (partName, effect))
print ' %s: %s' % (animName, ', '.join(list))
def osdAnimBlends(self, animName=None, partName=None, lodName=None):
if not onScreenDebug.enabled:
return
# puts anim blending info into the on-screen debug panel
if animName is None:
animNames = self.getAnimNames()
else:
animNames = [animName]
for animName in animNames:
if animName is 'nothing':
continue
thisAnim = ''
totalEffect = 0.
controls = self.getAnimControls(animName, partName, lodName)
for control in controls:
part = control.getPart()
name = part.getName()
effect = part.getControlEffect(control)
if effect > 0.:
totalEffect += effect
thisAnim += ('%s:%.3f, ' % (name, effect))
thisAnim += "\n"
for control in controls:
part = control.getPart()
name = part.getName()
rate = control.getPlayRate()
thisAnim += ('%s:%.1f, ' % (name, rate))
# don't display anything if this animation is not being played
itemName = 'anim %s' % animName
if totalEffect > 0.:
onScreenDebug.add(itemName, thisAnim)
else:
if onScreenDebug.has(itemName):
onScreenDebug.remove(itemName)
# these functions compensate for actors that are modeled facing the viewer but need
# to face away from the camera in the game
def faceAwayFromViewer(self):
self.getGeomNode().setH(180)
def faceTowardsViewer(self):
self.getGeomNode().setH(0)
def renamePartBundles(self, partName, newBundleName):
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
for partBundleDict in self.__partBundleDict.values():
partDef=partBundleDict.get(subpartDef.truePartName)
partDef.getBundle().setName(newBundleName)
| {
"repo_name": "mgracer48/panda3d",
"path": "direct/src/actor/Actor.py",
"copies": "1",
"size": "105876",
"license": "bsd-3-clause",
"hash": -5916693902674308000,
"line_mean": 40.3901485536,
"line_max": 116,
"alpha_frac": 0.5725943557,
"autogenerated": false,
"ratio": 4.3868241143567435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002817578130153273,
"num_lines": 2558
} |
"""Actor module: contains the Actor class"""
__all__ = ['Actor']
from panda3d.core import *
from panda3d.core import Loader as PandaLoader
from direct.showbase.DirectObject import DirectObject
from direct.directnotify import DirectNotifyGlobal
class Actor(DirectObject, NodePath):
"""
Actor class: Contains methods for creating, manipulating
and playing animations on characters
"""
notify = DirectNotifyGlobal.directNotify.newCategory("Actor")
partPrefix = "__Actor_"
modelLoaderOptions = LoaderOptions(LoaderOptions.LFSearch |
LoaderOptions.LFReportErrors |
LoaderOptions.LFConvertSkeleton)
animLoaderOptions = LoaderOptions(LoaderOptions.LFSearch |
LoaderOptions.LFReportErrors |
LoaderOptions.LFConvertAnim)
validateSubparts = ConfigVariableBool('validate-subparts', True)
mergeLODBundles = ConfigVariableBool('merge-lod-bundles', True)
allowAsyncBind = ConfigVariableBool('allow-async-bind', True)
class PartDef:
"""Instances of this class are stored within the
PartBundleDict to track all of the individual PartBundles
associated with the Actor. In general, each separately loaded
model file is a different PartBundle. This can include the
multiple different LOD's, as well as the multiple different
pieces of a multipart Actor. """
def __init__(self, partBundleNP, partBundleHandle, partModel):
# We also save the ModelRoot node along with the
# PartBundle, so that the reference count in the ModelPool
# will be accurate.
self.partBundleNP = partBundleNP
self.partBundleHandle = partBundleHandle
self.partModel = partModel
def getBundle(self):
return self.partBundleHandle.getBundle()
def __repr__(self):
return 'Actor.PartDef(%s, %s)' % (repr(self.partBundleNP), repr(self.partModel))
class AnimDef:
"""Instances of this class are stored within the
AnimControlDict to track all of the animations associated with
the Actor. This includes animations that have already been
bound (these have a valid AnimControl) as well as those that
have not yet been bound (for these, self.animControl is None).
There is a different AnimDef for each different part or
sub-part, times each different animation in the AnimDict. """
def __init__(self, filename = None, animBundle = None):
self.filename = filename
self.animBundle = None
self.animControl = None
def makeCopy(self):
return Actor.AnimDef(self.filename, self.animBundle)
def __repr__(self):
return 'Actor.AnimDef(%s)' % (repr(self.filename))
class SubpartDef:
"""Instances of this class are stored within the SubpartDict
to track the existance of arbitrary sub-parts. These are
designed to appear to the user to be identical to true "part"
of a multi-part Actor, but in fact each subpart represents a
subset of the joints of an existing part (which is accessible
via a different name). """
def __init__(self, truePartName, subset = PartSubset()):
self.truePartName = truePartName
self.subset = subset
def makeCopy(self):
return Actor.SubpartDef(self.truePartName, PartSubset(self.subset))
def __repr__(self):
return 'Actor.SubpartDef(%s, %s)' % (repr(self.truePartName), repr(self.subset))
def __init__(self, models=None, anims=None, other=None, copy=True,
lodNode = None, flattenable = True, setFinal = False,
mergeLODBundles = None, allowAsyncBind = None,
okMissing = None):
"""__init__(self, string | string:string{}, string:string{} |
string:(string:string{}){}, Actor=None)
Actor constructor: can be used to create single or multipart
actors. If another Actor is supplied as an argument this
method acts like a copy constructor. Single part actors are
created by calling with a model and animation dictionary
(animName:animPath{}) as follows:
a = Actor("panda-3k.egg", {"walk":"panda-walk.egg" \
"run":"panda-run.egg"})
This could be displayed and animated as such:
a.reparentTo(render)
a.loop("walk")
a.stop()
Multipart actors expect a dictionary of parts and a dictionary
of animation dictionaries (partName:(animName:animPath{}){}) as
below:
a = Actor(
# part dictionary
{"head":"char/dogMM/dogMM_Shorts-head-mod", \
"torso":"char/dogMM/dogMM_Shorts-torso-mod", \
"legs":"char/dogMM/dogMM_Shorts-legs-mod"}, \
# dictionary of anim dictionaries
{"head":{"walk":"char/dogMM/dogMM_Shorts-head-walk", \
"run":"char/dogMM/dogMM_Shorts-head-run"}, \
"torso":{"walk":"char/dogMM/dogMM_Shorts-torso-walk", \
"run":"char/dogMM/dogMM_Shorts-torso-run"}, \
"legs":{"walk":"char/dogMM/dogMM_Shorts-legs-walk", \
"run":"char/dogMM/dogMM_Shorts-legs-run"} \
})
In addition multipart actor parts need to be connected together
in a meaningful fashion:
a.attach("head", "torso", "joint-head")
a.attach("torso", "legs", "joint-hips")
#
# ADD LOD COMMENT HERE!
#
Other useful Actor class functions:
#fix actor eye rendering
a.drawInFront("joint-pupil?", "eyes*")
#fix bounding volumes - this must be done after drawing
#the actor for a few frames, otherwise it has no effect
a.fixBounds()
"""
try:
self.Actor_initialized
return
except:
self.Actor_initialized = 1
# initialize our NodePath essence
NodePath.__init__(self)
self.loader = PandaLoader.getGlobalPtr()
# Set the mergeLODBundles flag. If this is true, all
# different LOD's will be merged into a single common bundle
# (joint hierarchy). All LOD's will thereafter share the same
# skeleton, even though they may have been loaded from
# different egg files. If this is false, LOD's will be kept
# completely isolated, and each LOD will have its own
# skeleton.
# When this flag is true, __animControlDict has only one key,
# ['common']; when it is false, __animControlDict has one key
# per each LOD name.
if mergeLODBundles is None:
# If this isn't specified, it comes from the Config.prc
# file.
self.mergeLODBundles = Actor.mergeLODBundles.getValue()
else:
self.mergeLODBundles = mergeLODBundles
# Set the allowAsyncBind flag. If this is true, it enables
# asynchronous animation binding. This requires that you have
# run "egg-optchar -preload" on your animation and models to
# generate the appropriate AnimPreloadTable.
if allowAsyncBind is None:
self.allowAsyncBind = Actor.allowAsyncBind.getValue()
else:
self.allowAsyncBind = allowAsyncBind
# create data structures
self.__commonBundleHandles = {}
self.__partBundleDict = {}
self.__subpartDict = {}
self.__sortedLODNames = []
self.__animControlDict = {}
self.__subpartsComplete = False
self.__LODNode = None
self.__LODAnimation = None
self.__LODCenter = Point3(0, 0, 0)
self.switches = None
if (other == None):
# act like a normal constructor
# create base hierarchy
self.gotName = 0
if flattenable:
# If we want a flattenable Actor, don't create all
# those ModelNodes, and the GeomNode is the same as
# the root.
root = PandaNode('actor')
self.assign(NodePath(root))
self.setGeomNode(NodePath(self))
else:
# A standard Actor has a ModelNode at the root, and
# another ModelNode to protect the GeomNode.
root = ModelNode('actor')
root.setPreserveTransform(1)
self.assign(NodePath(root))
self.setGeomNode(self.attachNewNode(ModelNode('actorGeom')))
self.__hasLOD = 0
# load models
#
# four cases:
#
# models, anims{} = single part actor
# models{}, anims{} = single part actor w/ LOD
# models{}, anims{}{} = multi-part actor
# models{}{}, anims{}{} = multi-part actor w/ LOD
#
# make sure we have models
if models:
# do we have a dictionary of models?
if type(models) == dict:
# if this is a dictionary of dictionaries
if type(models[next(iter(models))]) == dict:
# then it must be a multipart actor w/LOD
self.setLODNode(node = lodNode)
# preserve numerical order for lod's
# this will make it easier to set ranges
sortedKeys = list(models.keys())
sortedKeys.sort()
for lodName in sortedKeys:
# make a node under the LOD switch
# for each lod (just because!)
self.addLOD(str(lodName))
# iterate over both dicts
for modelName in models[lodName]:
self.loadModel(models[lodName][modelName],
modelName, lodName, copy = copy,
okMissing = okMissing)
# then if there is a dictionary of dictionaries of anims
elif type(anims[next(iter(anims))]) == dict:
# then this is a multipart actor w/o LOD
for partName in models:
# pass in each part
self.loadModel(models[partName], partName,
copy = copy, okMissing = okMissing)
else:
# it is a single part actor w/LOD
self.setLODNode(node = lodNode)
# preserve order of LOD's
sortedKeys = list(models.keys())
sortedKeys.sort()
for lodName in sortedKeys:
self.addLOD(str(lodName))
# pass in dictionary of parts
self.loadModel(models[lodName], lodName=lodName,
copy = copy, okMissing = okMissing)
else:
# else it is a single part actor
self.loadModel(models, copy = copy, okMissing = okMissing)
# load anims
# make sure the actor has animations
if anims:
if len(anims) >= 1:
# if so, does it have a dictionary of dictionaries?
if type(anims[next(iter(anims))]) == dict:
# are the models a dict of dicts too?
if type(models) == dict:
if type(models[next(iter(models))]) == dict:
# then we have a multi-part w/ LOD
sortedKeys = list(models.keys())
sortedKeys.sort()
for lodName in sortedKeys:
# iterate over both dicts
for partName in anims:
self.loadAnims(
anims[partName], partName, lodName)
else:
# then it must be multi-part w/o LOD
for partName in anims:
self.loadAnims(anims[partName], partName)
elif type(models) == dict:
# then we have single-part w/ LOD
sortedKeys = list(models.keys())
sortedKeys.sort()
for lodName in sortedKeys:
self.loadAnims(anims, lodName=lodName)
else:
# else it is single-part w/o LOD
self.loadAnims(anims)
else:
self.copyActor(other, True) # overwrite everything
if setFinal:
# If setFinal is true, the Actor will set its top bounding
# volume to be the "final" bounding volume: the bounding
# volumes below the top volume will not be tested. If a
# cull test passes the top bounding volume, the whole
# Actor is rendered.
# We do this partly because an Actor is likely to be a
# fairly small object relative to the scene, and is pretty
# much going to be all onscreen or all offscreen anyway;
# and partly because of the Character bug that doesn't
# update the bounding volume for pieces that animate away
# from their original position. It's disturbing to see
# someone's hands disappear; better to cull the whole
# object or none of it.
self.__geomNode.node().setFinal(1)
def delete(self):
try:
self.Actor_deleted
return
except:
self.Actor_deleted = 1
self.cleanup()
def copyActor(self, other, overwrite=False):
# act like a copy constructor
self.gotName = other.gotName
# copy the scene graph elements of other
if (overwrite):
otherCopy = other.copyTo(NodePath())
otherCopy.detachNode()
# assign these elements to ourselve (overwrite)
self.assign(otherCopy)
else:
# just copy these to ourselves
otherCopy = other.copyTo(self)
# masad: check if otherCopy has a geomNode as its first child
# if actor is initialized with flattenable, then otherCopy, not
# its first child, is the geom node; check __init__, for reference
if other.getGeomNode().getName() == other.getName():
self.setGeomNode(otherCopy)
else:
self.setGeomNode(otherCopy.getChild(0))
# copy the switches for lods
self.switches = other.switches
self.__LODNode = self.find('**/+LODNode')
self.__hasLOD = 0
if (not self.__LODNode.isEmpty()):
self.__hasLOD = 1
# copy the part dictionary from other
self.__copyPartBundles(other)
self.__copySubpartDict(other)
self.__subpartsComplete = other.__subpartsComplete
# copy the anim dictionary from other
self.__copyAnimControls(other)
def __cmp__(self, other):
# Actor inherits from NodePath, which inherits a definition of
# __cmp__ from FFIExternalObject that uses the NodePath's
# compareTo() method to compare different NodePaths. But we
# don't want this behavior for Actors; Actors should only be
# compared pointerwise. A NodePath that happens to reference
# the same node is still different from the Actor.
if self is other:
return 0
else:
return 1
def __str__(self):
"""
Actor print function
"""
return "Actor %s, parts = %s, LODs = %s, anims = %s" % \
(self.getName(), self.getPartNames(), self.getLODNames(), self.getAnimNames())
def listJoints(self, partName="modelRoot", lodName="lodRoot"):
"""Handy utility function to list the joint hierarchy of the
actor. """
if self.mergeLODBundles:
partBundleDict = self.__commonBundleHandles
else:
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.error("no lod named: %s" % (lodName))
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef == None:
Actor.notify.error("no part named: %s" % (partName))
self.__doListJoints(0, partDef.getBundle(),
subpartDef.subset.isIncludeEmpty(), subpartDef.subset)
def __doListJoints(self, indentLevel, part, isIncluded, subset):
name = part.getName()
if subset.matchesInclude(name):
isIncluded = True
elif subset.matchesExclude(name):
isIncluded = False
if isIncluded:
value = ''
if hasattr(part, 'outputValue'):
lineStream = LineStream()
part.outputValue(lineStream)
value = lineStream.getLine()
print(' '.join((' ' * indentLevel, part.getName(), value)))
for child in part.getChildren():
self.__doListJoints(indentLevel + 2, child, isIncluded, subset)
def getActorInfo(self):
"""
Utility function to create a list of information about an actor.
Useful for iterating over details of an actor.
"""
lodInfo = []
for lodName, partDict in self.__animControlDict.items():
if self.mergeLODBundles:
lodName = self.__sortedLODNames[0]
partInfo = []
for partName in partDict:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partBundleDict = self.__partBundleDict.get(lodName)
partDef = partBundleDict.get(subpartDef.truePartName)
partBundle = partDef.getBundle()
animDict = partDict[partName]
animInfo = []
for animName in animDict:
file = animDict[animName].filename
animControl = animDict[animName].animControl
animInfo.append([animName, file, animControl])
partInfo.append([partName, partBundle, animInfo])
lodInfo.append([lodName, partInfo])
return lodInfo
def getAnimNames(self):
animNames = []
for lodName, lodInfo in self.getActorInfo():
for partName, bundle, animInfo in lodInfo:
for animName, file, animControl in animInfo:
if animName not in animNames:
animNames.append(animName)
return animNames
def pprint(self):
"""
Pretty print actor's details
"""
for lodName, lodInfo in self.getActorInfo():
print('LOD: %s' % lodName)
for partName, bundle, animInfo in lodInfo:
print(' Part: %s' % partName)
print(' Bundle: %r' % bundle)
for animName, file, animControl in animInfo:
print(' Anim: %s' % animName)
print(' File: %s' % file)
if animControl == None:
print(' (not loaded)')
else:
print(' NumFrames: %d PlayRate: %0.2f' %
(animControl.getNumFrames(),
animControl.getPlayRate()))
def cleanup(self):
"""
Actor cleanup function
"""
self.stop(None)
self.clearPythonData()
self.flush()
if(self.__geomNode):
self.__geomNode.removeNode()
self.__geomNode = None
if not self.isEmpty():
self.removeNode()
def removeNode(self):
if self.__geomNode and (self.__geomNode.getNumChildren() > 0):
assert self.notify.warning("called actor.removeNode() on %s without calling cleanup()" % self.getName())
NodePath.removeNode(self)
def clearPythonData(self):
self.__commonBundleHandles = {}
self.__partBundleDict = {}
self.__subpartDict = {}
self.__sortedLODNames = []
self.__animControlDict = {}
def flush(self):
"""
Actor flush function
"""
self.clearPythonData()
if self.__LODNode and (not self.__LODNode.isEmpty()):
self.__LODNode.removeNode()
self.__LODNode = None
# remove all its children
if self.__geomNode:
self.__geomNode.getChildren().detach()
self.__hasLOD = 0
# accessing
def getAnimControlDict(self):
return self.__animControlDict
def removeAnimControlDict(self):
self.__animControlDict = {}
def getPartBundleDict(self):
return self.__partBundleDict
def getPartBundles(self, partName = None):
""" Returns a list of PartBundle objects for the entire Actor,
or for the indicated part only. """
bundles = []
for lodName, partBundleDict in self.__partBundleDict.items():
if partName == None:
for partDef in partBundleDict.values():
bundles.append(partDef.getBundle())
else:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
bundles.append(partDef.getBundle())
else:
Actor.notify.warning("Couldn't find part: %s" % (partName))
return bundles
def __updateSortedLODNames(self):
# Cache the sorted LOD names so we don't have to grab them
# and sort them every time somebody asks for the list
self.__sortedLODNames = list(self.__partBundleDict.keys())
# Reverse sort the doing a string->int
def sortKey(x):
if not str(x).isdigit():
smap = {'h':3,
'm':2,
'l':1,
'f':0}
"""
sx = smap.get(x[0], None)
if sx is None:
self.notify.error('Invalid lodName: %s' % x)
"""
return smap[x[0]]
else:
return int(x)
self.__sortedLODNames.sort(key=sortKey, reverse=True)
def getLODNames(self):
"""
Return list of Actor LOD names. If not an LOD actor,
returns 'lodRoot'
Caution - this returns a reference to the list - not your own copy
"""
return self.__sortedLODNames
def getPartNames(self):
"""
Return list of Actor part names. If not an multipart actor,
returns 'modelRoot' NOTE: returns parts of arbitrary LOD
"""
partNames = []
if self.__partBundleDict:
partNames = list(next(iter(self.__partBundleDict.values())).keys())
return partNames + list(self.__subpartDict.keys())
def getGeomNode(self):
"""
Return the node that contains all actor geometry
"""
return self.__geomNode
def setGeomNode(self, node):
"""
Set the node that contains all actor geometry
"""
self.__geomNode = node
def getLODNode(self):
"""
Return the node that switches actor geometry in and out"""
return self.__LODNode.node()
def setLODNode(self, node=None):
"""
Set the node that switches actor geometry in and out.
If one is not supplied as an argument, make one
"""
if (node == None):
node = LODNode.makeDefaultLod("lod")
if self.__LODNode:
self.__LODNode = node
else:
self.__LODNode = self.__geomNode.attachNewNode(node)
self.__hasLOD = 1
self.switches = {}
def useLOD(self, lodName):
"""
Make the Actor ONLY display the given LOD
"""
# make sure we don't call this twice in a row
# and pollute the the switches dictionary
## sortedKeys = list(self.switches.keys())
## sortedKeys.sort()
child = self.__LODNode.find(str(lodName))
index = self.__LODNode.node().findChild(child.node())
self.__LODNode.node().forceSwitch(index)
def printLOD(self):
## sortedKeys = list(self.switches.keys())
## sortedKeys.sort()
sortedKeys = self.__sortedLODNames
for eachLod in sortedKeys:
print("python switches for %s: in: %d, out %d" % (eachLod,
self.switches[eachLod][0],
self.switches[eachLod][1]))
switchNum = self.__LODNode.node().getNumSwitches()
for eachSwitch in range(0, switchNum):
print("c++ switches for %d: in: %d, out: %d" % (eachSwitch,
self.__LODNode.node().getIn(eachSwitch),
self.__LODNode.node().getOut(eachSwitch)))
def resetLOD(self):
"""
Restore all switch distance info (usually after a useLOD call)"""
self.__LODNode.node().clearForceSwitch()
## sortedKeys = list(self.switches.keys())
## sortedKeys.sort()
## for eachLod in sortedKeys:
## index = sortedKeys.index(eachLod)
## self.__LODNode.node().setSwitch(index, self.switches[eachLod][0],
## self.switches[eachLod][1])
def addLOD(self, lodName, inDist=0, outDist=0, center=None):
"""addLOD(self, string)
Add a named node under the LODNode to parent all geometry
of a specific LOD under.
"""
self.__LODNode.attachNewNode(str(lodName))
# save the switch distance info
self.switches[lodName] = [inDist, outDist]
# add the switch distance info
self.__LODNode.node().addSwitch(inDist, outDist)
if center != None:
self.setCenter(center)
def setLOD(self, lodName, inDist=0, outDist=0):
"""setLOD(self, string)
Set the switch distance for given LOD
"""
# save the switch distance info
self.switches[lodName] = [inDist, outDist]
# add the switch distance info
## sortedKeys = list(self.switches.keys())
## sortedKeys.sort()
self.__LODNode.node().setSwitch(self.getLODIndex(lodName), inDist, outDist)
def getLODIndex(self, lodName):
"""getLODIndex(self)
safe method (but expensive) for retrieving the child index
"""
return list(self.__LODNode.getChildren()).index(self.getLOD(lodName))
def getLOD(self, lodName):
"""getLOD(self, string)
Get the named node under the LOD to which we parent all LOD
specific geometry to. Returns 'None' if not found
"""
if self.__LODNode:
lod = self.__LODNode.find(str(lodName))
if lod.isEmpty():
return None
else:
return lod
else:
return None
def hasLOD(self):
"""
Return 1 if the actor has LODs, 0 otherwise
"""
return self.__hasLOD
def setCenter(self, center):
if center == None:
center = Point3(0, 0, 0)
self.__LODCenter = center
if self.__LODNode:
self.__LODNode.node().setCenter(self.__LODCenter)
if self.__LODAnimation:
self.setLODAnimation(*self.__LODAnimation)
def setLODAnimation(self, farDistance, nearDistance, delayFactor):
""" Activates a special mode in which the Actor animates less
frequently as it gets further from the camera. This is
intended as a simple optimization to minimize the effort of
computing animation for lots of characters that may not
necessarily be very important to animate every frame.
If the character is closer to the camera than near_distance,
then it is animated its normal rate, every frame. If the
character is exactly far_distance away, it is animated only
every delay_factor seconds (which should be a number greater
than 0). If the character is between near_distance and
far_distance, its animation rate is linearly interpolated
according to its distance between the two. The interpolation
function continues beyond far_distance, so that the character
is animated increasingly less frequently as it gets farther
away. """
self.__LODAnimation = (farDistance, nearDistance, delayFactor)
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().setLodAnimation(self.__LODCenter, farDistance, nearDistance, delayFactor)
def clearLODAnimation(self):
""" Description: Undoes the effect of a recent call to
set_lod_animation(). Henceforth, the character will animate
every frame, regardless of its distance from the camera.
"""
self.__LODAnimation = None
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().clearLodAnimation()
def update(self, lod=0, partName=None, lodName=None, force=False):
""" Updates all of the Actor's joints in the indicated LOD.
The LOD may be specified by name, or by number, where 0 is the
highest level of detail, 1 is the next highest, and so on.
If force is True, this will update every joint, even if we
don't believe it's necessary.
Returns True if any joint has changed as a result of this,
False otherwise. """
if lodName == None:
lodNames = self.getLODNames()
else:
lodNames = [lodName]
anyChanged = False
if lod < len(lodNames):
lodName = lodNames[lod]
if partName == None:
partBundleDict = self.__partBundleDict[lodName]
partNames = list(partBundleDict.keys())
else:
partNames = [partName]
for partName in partNames:
partBundle = self.getPartBundle(partName, lodNames[lod])
if force:
if partBundle.forceUpdate():
anyChanged = True
else:
if partBundle.update():
anyChanged = True
else:
self.notify.warning('update() - no lod: %d' % lod)
return anyChanged
def getFrameRate(self, animName=None, partName=None):
"""getFrameRate(self, string, string=None)
Return actual frame rate of given anim name and given part.
If no anim specified, use the currently playing anim.
If no part specified, return anim durations of first part.
NOTE: returns info only for an arbitrary LOD
"""
lodName = next(iter(self.__animControlDict))
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getFrameRate()
def getBaseFrameRate(self, animName=None, partName=None):
"""getBaseFrameRate(self, string, string=None)
Return frame rate of given anim name and given part, unmodified
by any play rate in effect.
"""
lodName = next(iter(self.__animControlDict))
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getAnim().getBaseFrameRate()
def getPlayRate(self, animName=None, partName=None):
"""
Return the play rate of given anim for a given part.
If no part is given, assume first part in dictionary.
If no anim is given, find the current anim for the part.
NOTE: Returns info only for an arbitrary LOD
"""
if self.__animControlDict:
# use the first lod
lodName = next(iter(self.__animControlDict))
controls = self.getAnimControls(animName, partName)
if controls:
return controls[0].getPlayRate()
return None
def setPlayRate(self, rate, animName, partName=None):
"""setPlayRate(self, float, string, string=None)
Set the play rate of given anim for a given part.
If no part is given, set for all parts in dictionary.
It used to be legal to let the animName default to the
currently-playing anim, but this was confusing and could lead
to the wrong anim's play rate getting set. Better to insist
on this parameter.
NOTE: sets play rate on all LODs"""
for control in self.getAnimControls(animName, partName):
control.setPlayRate(rate)
def getDuration(self, animName=None, partName=None,
fromFrame=None, toFrame=None):
"""
Return duration of given anim name and given part.
If no anim specified, use the currently playing anim.
If no part specified, return anim duration of first part.
NOTE: returns info for arbitrary LOD
"""
lodName = next(iter(self.__animControlDict))
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
animControl = controls[0]
if fromFrame is None:
fromFrame = 0
if toFrame is None:
toFrame = animControl.getNumFrames()-1
return ((toFrame+1)-fromFrame) / animControl.getFrameRate()
def getNumFrames(self, animName=None, partName=None):
lodName = next(iter(self.__animControlDict))
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getNumFrames()
def getFrameTime(self, anim, frame, partName=None):
numFrames = self.getNumFrames(anim,partName)
animTime = self.getDuration(anim,partName)
frameTime = animTime * float(frame) / numFrames
return frameTime
def getCurrentAnim(self, partName=None):
"""
Return the anim currently playing on the actor. If part not
specified return current anim of an arbitrary part in dictionary.
NOTE: only returns info for an arbitrary LOD
"""
if len(self.__animControlDict) == 0:
return
lodName, animControlDict = next(iter(self.__animControlDict.items()))
if partName == None:
partName, animDict = next(iter(animControlDict.items()))
else:
animDict = animControlDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
return None
# loop through all anims for named part and find if any are playing
for animName, anim in animDict.items():
if anim.animControl and anim.animControl.isPlaying():
return animName
# we must have found none, or gotten an error
return None
def getCurrentFrame(self, animName=None, partName=None):
"""
Return the current frame number of the named anim, or if no
anim is specified, then the anim current playing on the
actor. If part not specified return current anim of first part
in dictionary. NOTE: only returns info for an arbitrary LOD
"""
lodName, animControlDict = next(iter(self.__animControlDict.items()))
if partName == None:
partName, animDict = next(iter(animControlDict.items()))
else:
animDict = animControlDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
return None
if animName:
anim = animDict.get(animName)
if not anim:
Actor.notify.warning("couldn't find anim: %s" % (animName))
elif anim.animControl:
return anim.animControl.getFrame()
else:
# loop through all anims for named part and find if any are playing
for animName, anim in animDict.items():
if anim.animControl and anim.animControl.isPlaying():
return anim.animControl.getFrame()
# we must have found none, or gotten an error
return None
# arranging
def getPart(self, partName, lodName="lodRoot"):
"""
Find the named part in the optional named lod and return it, or
return None if not present
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
return partDef.partBundleNP
return None
def getPartBundle(self, partName, lodName="lodRoot"):
"""
Find the named part in the optional named lod and return its
associated PartBundle, or return None if not present
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
return partDef.getBundle()
return None
def removePart(self, partName, lodName="lodRoot"):
"""
Remove the geometry and animations of the named part of the
optional named lod if present.
NOTE: this will remove child geometry also!
"""
# find the corresponding part bundle dict
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
# remove the part
if (partName in partBundleDict):
partBundleDict[partName].partBundleNP.removeNode()
del(partBundleDict[partName])
# find the corresponding anim control dict
if self.mergeLODBundles:
lodName = 'common'
partDict = self.__animControlDict.get(lodName)
if not partDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
# remove the animations
if (partName in partDict):
del(partDict[partName])
def hidePart(self, partName, lodName="lodRoot"):
"""
Make the given part of the optionally given lod not render,
even though still in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.hide()
else:
Actor.notify.warning("no part named %s!" % (partName))
def showPart(self, partName, lodName="lodRoot"):
"""
Make the given part render while in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.show()
else:
Actor.notify.warning("no part named %s!" % (partName))
def showAllParts(self, partName, lodName="lodRoot"):
"""
Make the given part and all its children render while in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.show()
partDef.partBundleNP.getChildren().show()
else:
Actor.notify.warning("no part named %s!" % (partName))
def exposeJoint(self, node, partName, jointName, lodName="lodRoot",
localTransform = 0):
"""exposeJoint(self, NodePath, string, string, key="lodRoot")
Starts the joint animating the indicated node. As the joint
animates, it will transform the node by the corresponding
amount. This will replace whatever matrix is on the node each
frame. The default is to expose the net transform from the root,
but if localTransform is true, only the node's local transform
from its parent is exposed."""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
# Get a handle to the joint.
joint = bundle.findChild(jointName)
if node == None:
node = self.attachNewNode(jointName)
if (joint):
if localTransform:
joint.addLocalTransform(node.node())
else:
joint.addNetTransform(node.node())
else:
Actor.notify.warning("no joint named %s!" % (jointName))
return node
def stopJoint(self, partName, jointName, lodName="lodRoot"):
"""stopJoint(self, string, string, key="lodRoot")
Stops the joint from animating external nodes. If the joint
is animating a transform on a node, this will permanently stop
it. However, this does not affect vertex animations."""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
# Get a handle to the joint.
joint = bundle.findChild(jointName)
if (joint):
joint.clearNetTransforms()
joint.clearLocalTransforms()
else:
Actor.notify.warning("no joint named %s!" % (jointName))
def getJoints(self, partName = None, jointName = '*', lodName = None):
""" Returns the list of all joints, from the named part or
from all parts, that match the indicated jointName. The
jointName may include pattern characters like *. """
joints=[]
pattern = GlobPattern(jointName)
if lodName == None and self.mergeLODBundles:
# Get the common bundle.
partBundleDicts = [self.__commonBundleHandles]
elif lodName == None:
# Get all LOD's.
partBundleDicts = self.__partBundleDict.values()
else:
# Get one LOD.
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("couldn't find lod: %s" % (lodName))
return []
partBundleDicts = [partBundleDict]
for partBundleDict in partBundleDicts:
parts = []
if partName:
subpartDef = self.__subpartDict.get(partName, None)
if not subpartDef:
# Whole part
subset = None
partDef = partBundleDict.get(partName)
else:
# Sub-part
subset = subpartDef.subset
partDef = partBundleDict.get(subpartDef.truePartName)
if not partDef:
Actor.notify.warning("no part named %s!" % (partName))
return []
parts = [partDef]
else:
subset = None
parts = partBundleDict.values()
for partData in parts:
partBundle = partData.getBundle()
if not pattern.hasGlobCharacters() and not subset:
# The simple case.
joint = partBundle.findChild(jointName)
if joint:
joints.append(joint)
else:
# The more complex case.
isIncluded = True
if subset:
isIncluded = subset.isIncludeEmpty()
self.__getPartJoints(joints, pattern, partBundle, subset, isIncluded)
return joints
def getOverlappingJoints(self, partNameA, partNameB, jointName = '*', lodName = None):
""" Returns the set of joints, matching jointName, that are
shared between partNameA and partNameB. """
jointsA = set(self.getJoints(partName = partNameA, jointName = jointName, lodName = lodName))
jointsB = set(self.getJoints(partName = partNameB, jointName = jointName, lodName = lodName))
return jointsA & jointsB
def __getPartJoints(self, joints, pattern, partNode, subset, isIncluded):
""" Recursively walks the joint hierarchy to look for matching
joint names, implementing getJoints(). """
name = partNode.getName()
if subset:
# Constrain the traversal just to the named subset.
if subset.matchesInclude(name):
isIncluded = True
elif subset.matchesExclude(name):
isIncluded = False
if isIncluded and pattern.matches(name) and isinstance(partNode, MovingPartBase):
joints.append(partNode)
for child in partNode.getChildren():
self.__getPartJoints(joints, pattern, child, subset, isIncluded)
def getJointTransform(self, partName, jointName, lodName='lodRoot'):
partBundleDict=self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
joint = bundle.findChild(jointName)
if joint == None:
Actor.notify.warning("no joint named %s!" % (jointName))
return None
return joint.getDefaultValue()
def getJointTransformState(self, partName, jointName, lodName='lodRoot'):
partBundleDict=self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
joint = bundle.findChild(jointName)
if joint == None:
Actor.notify.warning("no joint named %s!" % (jointName))
return None
return joint.getTransformState()
def controlJoint(self, node, partName, jointName, lodName="lodRoot"):
"""The converse of exposeJoint: this associates the joint with
the indicated node, so that the joint transform will be copied
from the node to the joint each frame. This can be used for
programmer animation of a particular joint at runtime.
The parameter node should be the NodePath for the node whose
transform will animate the joint. If node is None, a new node
will automatically be created and loaded with the joint's
initial transform. In either case, the node used will be
returned.
It used to be necessary to call this before any animations
have been loaded and bound, but that is no longer so.
"""
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
anyGood = False
for bundleDict in self.__partBundleDict.values():
bundle = bundleDict[trueName].getBundle()
if node == None:
node = self.attachNewNode(ModelNode(jointName))
joint = bundle.findChild(jointName)
if joint and isinstance(joint, MovingPartMatrix):
node.setMat(joint.getDefaultValue())
if bundle.controlJoint(jointName, node.node()):
anyGood = True
if not anyGood:
self.notify.warning("Cannot control joint %s" % (jointName))
return node
def freezeJoint(self, partName, jointName, transform = None,
pos=Vec3(0,0,0), hpr=Vec3(0,0,0), scale=Vec3(1,1,1)):
"""Similar to controlJoint, but the transform assigned is
static, and may not be animated at runtime (without another
subsequent call to freezeJoint). This is slightly more
optimal than controlJoint() for cases in which the transform
is not intended to be animated during the lifetime of the
Actor. """
if transform == None:
transform = TransformState.makePosHprScale(pos, hpr, scale)
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
anyGood = False
for bundleDict in self.__partBundleDict.values():
if bundleDict[trueName].getBundle().freezeJoint(jointName, transform):
anyGood = True
if not anyGood:
self.notify.warning("Cannot freeze joint %s" % (jointName))
def releaseJoint(self, partName, jointName):
"""Undoes a previous call to controlJoint() or freezeJoint()
and restores the named joint to its normal animation. """
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
for bundleDict in self.__partBundleDict.values():
bundleDict[trueName].getBundle().releaseJoint(jointName)
def instance(self, path, partName, jointName, lodName="lodRoot"):
"""instance(self, NodePath, string, string, key="lodRoot")
Instance a nodePath to an actor part at a joint called jointName"""
partBundleDict = self.__partBundleDict.get(lodName)
if partBundleDict:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
joint = partDef.partBundleNP.find("**/" + jointName)
if (joint.isEmpty()):
Actor.notify.warning("%s not found!" % (jointName))
else:
return path.instanceTo(joint)
else:
Actor.notify.warning("no part named %s!" % (partName))
else:
Actor.notify.warning("no lod named %s!" % (lodName))
def attach(self, partName, anotherPartName, jointName, lodName="lodRoot"):
"""attach(self, string, string, string, key="lodRoot")
Attach one actor part to another at a joint called jointName"""
partBundleDict = self.__partBundleDict.get(lodName)
if partBundleDict:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
anotherPartDef = partBundleDict.get(anotherPartName)
if anotherPartDef:
joint = anotherPartDef.partBundleNP.find("**/" + jointName)
if (joint.isEmpty()):
Actor.notify.warning("%s not found!" % (jointName))
else:
partDef.partBundleNP.reparentTo(joint)
else:
Actor.notify.warning("no part named %s!" % (anotherPartName))
else:
Actor.notify.warning("no part named %s!" % (partName))
else:
Actor.notify.warning("no lod named %s!" % (lodName))
def drawInFront(self, frontPartName, backPartName, mode,
root=None, lodName=None):
"""drawInFront(self, string, int, string=None, key=None)
Arrange geometry so the frontPart(s) are drawn in front of
backPart.
If mode == -1, the geometry is simply arranged to be drawn in
the correct order, assuming it is already under a
direct-render scene graph (like the DirectGui system). That
is, frontPart is reparented to backPart, and backPart is
reordered to appear first among its siblings.
If mode == -2, the geometry is arranged to be drawn in the
correct order, and depth test/write is turned off for
frontPart.
If mode == -3, frontPart is drawn as a decal onto backPart.
This assumes that frontPart is mostly coplanar with and does
not extend beyond backPart, and that backPart is mostly flat
(not self-occluding).
If mode > 0, the frontPart geometry is placed in the 'fixed'
bin, with the indicated drawing order. This will cause it to
be drawn after almost all other geometry. In this case, the
backPartName is actually unused.
Takes an optional argument root as the start of the search for the
given parts. Also takes optional lod name to refine search for the
named parts. If root and lod are defined, we search for the given
root under the given lod.
"""
# check to see if we are working within an lod
if lodName != None:
# find the named lod node
lodRoot = self.__LODNode.find(str(lodName))
if root == None:
# no need to look further
root = lodRoot
else:
# look for root under lod
root = lodRoot.find("**/" + root)
else:
# start search from self if no root and no lod given
if root == None:
root = self
frontParts = root.findAllMatches("**/" + frontPartName)
if mode > 0:
# Use the 'fixed' bin instead of reordering the scene
# graph.
for part in frontParts:
part.setBin('fixed', mode)
return
if mode == -2:
# Turn off depth test/write on the frontParts.
for part in frontParts:
part.setDepthWrite(0)
part.setDepthTest(0)
# Find the back part.
backPart = root.find("**/" + backPartName)
if (backPart.isEmpty()):
Actor.notify.warning("no part named %s!" % (backPartName))
return
if mode == -3:
# Draw as a decal.
backPart.node().setEffect(DecalEffect.make())
else:
# Reorder the backPart to be the first of its siblings.
backPart.reparentTo(backPart.getParent(), -1)
#reparent all the front parts to the back part
frontParts.reparentTo(backPart)
def fixBounds(self, partName = None):
if(partName == None):
#iterate through everything
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().update()
geomNodes = char.findAllMatches("**/+GeomNode")
for thisGeomNode in geomNodes:
for thisGeom in thisGeomNode.node().getGeoms():
thisGeom.markBoundsStale()
thisGeomNode.node().markInternalBoundsStale()
else:
#iterate through for a specific part
for lodData in self.__partBundleDict.values():
partData = lodData.get(partName)
if(partData):
char = partData.partBundleNP
char.node().update()
geomNodes = char.findAllMatches("**/+GeomNode")
for thisGeomNode in geomNodes:
for thisGeom in thisGeomNode.node().getGeoms():
thisGeom.markBoundsStale()
thisGeomNode.node().markInternalBoundsStale()
def fixBounds_old(self, part=None):
"""fixBounds(self, nodePath=None)
Force recomputation of bounding spheres for all geoms
in a given part. If no part specified, fix all geoms
in this actor
"""
# if no part name specified fix all parts
if (part==None):
part = self
# update all characters first
charNodes = part.findAllMatches("**/+Character")
for charNode in charNodes:
charNode.node().update()
# for each geomNode, iterate through all geoms and force update
# of bounding spheres by marking current bounds as stale
geomNodes = part.findAllMatches("**/+GeomNode")
for nodeNum, thisGeomNode in enumerate(geomNodes):
for geomNum, thisGeom in enumerate(thisGeomNode.node().getGeoms()):
thisGeom.markBoundsStale()
assert Actor.notify.debug("fixing bounds for node %s, geom %s" % \
(nodeNum, geomNum))
thisGeomNode.node().markInternalBoundsStale()
def showAllBounds(self):
"""
Show the bounds of all actor geoms
"""
geomNodes = self.__geomNode.findAllMatches("**/+GeomNode")
for node in geomNodes:
node.showBounds()
def hideAllBounds(self):
"""
Hide the bounds of all actor geoms
"""
geomNodes = self.__geomNode.findAllMatches("**/+GeomNode")
for node in geomNodes:
node.hideBounds()
# actions
def animPanel(self):
# Don't use a regular import, to prevent ModuleFinder from picking
# it up as a dependency when building a .p3d package.
import importlib
AnimPanel = importlib.import_module('direct.tkpanels.AnimPanel')
return AnimPanel.AnimPanel(self)
def stop(self, animName=None, partName=None):
"""stop(self, string=None, string=None)
Stop named animation on the given part of the actor.
If no name specified then stop all animations on the actor.
NOTE: stops all LODs"""
for control in self.getAnimControls(animName, partName):
control.stop()
def play(self, animName, partName=None, fromFrame=None, toFrame=None):
"""play(self, string, string=None)
Play the given animation on the given part of the actor.
If no part is specified, try to play on all parts. NOTE:
plays over ALL LODs"""
if fromFrame == None:
for control in self.getAnimControls(animName, partName):
control.play()
else:
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.play(fromFrame, control.getNumFrames() - 1)
else:
control.play(fromFrame, toFrame)
def loop(self, animName, restart=1, partName=None,
fromFrame=None, toFrame=None):
"""loop(self, string, int=1, string=None)
Loop the given animation on the given part of the actor,
restarting at zero frame if requested. If no part name
is given then try to loop on all parts. NOTE: loops on
all LOD's
"""
if fromFrame == None:
for control in self.getAnimControls(animName, partName):
control.loop(restart)
else:
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.loop(restart, fromFrame, control.getNumFrames() - 1)
else:
control.loop(restart, fromFrame, toFrame)
def pingpong(self, animName, restart=1, partName=None,
fromFrame=None, toFrame=None):
"""pingpong(self, string, int=1, string=None)
Loop the given animation on the given part of the actor,
restarting at zero frame if requested. If no part name
is given then try to loop on all parts. NOTE: loops on
all LOD's"""
if fromFrame == None:
fromFrame = 0
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.pingpong(restart, fromFrame, control.getNumFrames() - 1)
else:
control.pingpong(restart, fromFrame, toFrame)
def pose(self, animName, frame, partName=None, lodName=None):
"""pose(self, string, int, string=None)
Pose the actor in position found at given frame in the specified
animation for the specified part. If no part is specified attempt
to apply pose to all parts."""
for control in self.getAnimControls(animName, partName, lodName):
control.pose(frame)
def setBlend(self, animBlend = None, frameBlend = None,
blendType = None, partName = None):
"""
Changes the way the Actor handles blending of multiple
different animations, and/or interpolation between consecutive
frames.
The animBlend and frameBlend parameters are boolean flags.
You may set either or both to True or False. If you do not
specify them, they do not change from the previous value.
When animBlend is True, multiple different animations may
simultaneously be playing on the Actor. This means you may
call play(), loop(), or pose() on multiple animations and have
all of them contribute to the final pose each frame.
In this mode (that is, when animBlend is True), starting a
particular animation with play(), loop(), or pose() does not
implicitly make the animation visible; you must also call
setControlEffect() for each animation you wish to use to
indicate how much each animation contributes to the final
pose.
The frameBlend flag is unrelated to playing multiple
animations. It controls whether the Actor smoothly
interpolates between consecutive frames of its animation (when
the flag is True) or holds each frame until the next one is
ready (when the flag is False). The default value of
frameBlend is controlled by the interpolate-frames Config.prc
variable.
In either case, you may also specify blendType, which controls
the precise algorithm used to blend two or more different
matrix values into a final result. Different skeleton
hierarchies may benefit from different algorithms. The
default blendType is controlled by the anim-blend-type
Config.prc variable.
"""
for bundle in self.getPartBundles(partName = partName):
if blendType != None:
bundle.setBlendType(blendType)
if animBlend != None:
bundle.setAnimBlendFlag(animBlend)
if frameBlend != None:
bundle.setFrameBlendFlag(frameBlend)
def enableBlend(self, blendType = PartBundle.BTNormalizedLinear, partName = None):
"""
Enables blending of multiple animations simultaneously.
After this is called, you may call play(), loop(), or pose()
on multiple animations and have all of them contribute to the
final pose each frame.
With blending in effect, starting a particular animation with
play(), loop(), or pose() does not implicitly make the
animation visible; you must also call setControlEffect() for
each animation you wish to use to indicate how much each
animation contributes to the final pose.
This method is deprecated. You should use setBlend() instead.
"""
self.setBlend(animBlend = True, blendType = blendType, partName = partName)
def disableBlend(self, partName = None):
"""
Restores normal one-animation-at-a-time operation after a
previous call to enableBlend().
This method is deprecated. You should use setBlend() instead.
"""
self.setBlend(animBlend = False, partName = partName)
def setControlEffect(self, animName, effect,
partName = None, lodName = None):
"""
Sets the amount by which the named animation contributes to
the overall pose. This controls blending of multiple
animations; it only makes sense to call this after a previous
call to setBlend(animBlend = True).
"""
for control in self.getAnimControls(animName, partName, lodName):
control.getPart().setControlEffect(control, effect)
def getAnimFilename(self, animName, partName='modelRoot'):
"""
getAnimFilename(self, animName)
return the animFilename given the animName
"""
if self.mergeLODBundles:
lodName = 'common'
elif self.switches:
lodName = str(next(iter(self.switches)))
else:
lodName = 'lodRoot'
try:
return self.__animControlDict[lodName][partName][animName].filename
except:
return None
def getAnimControl(self, animName, partName=None, lodName=None,
allowAsyncBind = True):
"""
getAnimControl(self, string, string, string="lodRoot")
Search the animControl dictionary indicated by lodName for
a given anim and part. If none specified, try the first part and lod.
Return the animControl if present, or None otherwise.
"""
if not partName:
partName = 'modelRoot'
if self.mergeLODBundles:
lodName = 'common'
elif not lodName:
if self.switches:
lodName = str(next(iter(self.switches)))
else:
lodName = 'lodRoot'
partDict = self.__animControlDict.get(lodName)
# if this assertion fails, named lod was not present
assert partDict != None
animDict = partDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
else:
anim = animDict.get(animName)
if anim == None:
# anim was not present
assert Actor.notify.debug("couldn't find anim: %s" % (animName))
pass
else:
# bind the animation first if we need to
if not anim.animControl:
self.__bindAnimToPart(animName, partName, lodName,
allowAsyncBind = allowAsyncBind)
elif not allowAsyncBind:
anim.animControl.waitPending()
return anim.animControl
return None
def getAnimControls(self, animName=None, partName=None, lodName=None,
allowAsyncBind = True):
"""getAnimControls(self, string, string=None, string=None)
Returns a list of the AnimControls that represent the given
animation for the given part and the given lod.
If animName is None or omitted, the currently-playing
animation (or all currently-playing animations) is returned.
If animName is True, all animations are returned. If animName
is a single string name, that particular animation is
returned. If animName is a list of string names, all of the
names animations are returned.
If partName is None or omitted, all parts are returned (or
possibly the one overall Actor part, according to the
subpartsComplete flag).
If lodName is None or omitted, all LOD's are returned.
"""
if partName == None and self.__subpartsComplete:
# If we have the __subpartsComplete flag, and no partName
# is specified, it really means to play the animation on
# all subparts, not on the overall Actor.
partName = list(self.__subpartDict.keys())
controls = []
# build list of lodNames and corresponding animControlDicts
# requested.
if lodName == None or self.mergeLODBundles:
# Get all LOD's
animControlDictItems = self.__animControlDict.items()
else:
partDict = self.__animControlDict.get(lodName)
if partDict == None:
Actor.notify.warning("couldn't find lod: %s" % (lodName))
animControlDictItems = []
else:
animControlDictItems = [(lodName, partDict)]
for lodName, partDict in animControlDictItems:
# Now, build the list of partNames and the corresponding
# animDicts.
if partName == None:
# Get all main parts, but not sub-parts.
animDictItems = []
for thisPart, animDict in partDict.items():
if thisPart not in self.__subpartDict:
animDictItems.append((thisPart, animDict))
else:
# Get exactly the named part or parts.
if isinstance(partName, str):
partNameList = [partName]
else:
partNameList = partName
animDictItems = []
for pName in partNameList:
animDict = partDict.get(pName)
if animDict == None:
# Maybe it's a subpart that hasn't been bound yet.
subpartDef = self.__subpartDict.get(pName)
if subpartDef:
animDict = {}
partDict[pName] = animDict
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (pName))
else:
animDictItems.append((pName, animDict))
if animName is None:
# get all playing animations
for thisPart, animDict in animDictItems:
for anim in animDict.values():
if anim.animControl and anim.animControl.isPlaying():
controls.append(anim.animControl)
else:
# get the named animation(s) only.
if isinstance(animName, str):
# A single animName
animNameList = [animName]
else:
# A list of animNames, or True to indicate all anims.
animNameList = animName
for thisPart, animDict in animDictItems:
names = animNameList
if animNameList is True:
names = animDict.keys()
for animName in names:
anim = animDict.get(animName)
if anim == None and partName != None:
for pName in partNameList:
# Maybe it's a subpart that hasn't been bound yet.
subpartDef = self.__subpartDict.get(pName)
if subpartDef:
truePartName = subpartDef.truePartName
anim = partDict[truePartName].get(animName)
if anim:
anim = anim.makeCopy()
animDict[animName] = anim
if anim == None:
# anim was not present
assert Actor.notify.debug("couldn't find anim: %s" % (animName))
pass
else:
# bind the animation first if we need to
animControl = anim.animControl
if animControl == None:
animControl = self.__bindAnimToPart(
animName, thisPart, lodName,
allowAsyncBind = allowAsyncBind)
elif not allowAsyncBind:
# Force the animation to load if it's
# not already loaded.
animControl.waitPending()
if animControl:
controls.append(animControl)
return controls
def loadModel(self, modelPath, partName="modelRoot", lodName="lodRoot",
copy = True, okMissing = None, autoBindAnims = True):
"""Actor model loader. Takes a model name (ie file path), a part
name(defaults to "modelRoot") and an lod name(defaults to "lodRoot").
"""
assert partName not in self.__subpartDict
assert Actor.notify.debug("in loadModel: %s, part: %s, lod: %s, copy: %s" % \
(modelPath, partName, lodName, copy))
if isinstance(modelPath, NodePath):
# If we got a NodePath instead of a string, use *that* as
# the model directly.
if (copy):
model = modelPath.copyTo(NodePath())
else:
model = modelPath
else:
# otherwise, we got the name of the model to load.
loaderOptions = self.modelLoaderOptions
if not copy:
# If copy = 0, then we should always hit the disk.
loaderOptions = LoaderOptions(loaderOptions)
loaderOptions.setFlags(loaderOptions.getFlags() & ~LoaderOptions.LFNoRamCache)
if okMissing is not None:
if okMissing:
loaderOptions.setFlags(loaderOptions.getFlags() & ~LoaderOptions.LFReportErrors)
else:
loaderOptions.setFlags(loaderOptions.getFlags() | LoaderOptions.LFReportErrors)
# Pass loaderOptions to specify that we want to
# get the skeleton model. This only matters to model
# files (like .mb) for which we can choose to extract
# either the skeleton or animation, or neither.
model = self.loader.loadSync(Filename(modelPath), loaderOptions)
if model is not None:
model = NodePath(model)
if (model == None):
raise IOError("Could not load Actor model %s" % (modelPath))
if (model.node().isOfType(Character.getClassType())):
bundleNP = model
else:
bundleNP = model.find("**/+Character")
if (bundleNP.isEmpty()):
Actor.notify.warning("%s is not a character!" % (modelPath))
model.reparentTo(self.__geomNode)
else:
# Maybe the model file also included some animations. If
# so, try to bind them immediately and put them into the
# animControlDict.
if autoBindAnims:
acc = AnimControlCollection()
autoBind(model.node(), acc, ~0)
numAnims = acc.getNumAnims()
else:
numAnims = 0
# Now extract out the Character and integrate it with
# the Actor.
if (lodName!="lodRoot"):
# parent to appropriate node under LOD switch
bundleNP.reparentTo(self.__LODNode.find(str(lodName)))
else:
bundleNP.reparentTo(self.__geomNode)
self.__prepareBundle(bundleNP, model.node(), partName, lodName)
# we rename this node to make Actor copying easier
bundleNP.node().setName("%s%s"%(Actor.partPrefix,partName))
if numAnims != 0:
# If the model had some animations, store them in the
# dict so they can be played.
Actor.notify.info("model contains %s animations." % (numAnims))
# make sure this lod is in anim control dict
if self.mergeLODBundles:
lodName = 'common'
self.__animControlDict.setdefault(lodName, {})
self.__animControlDict[lodName].setdefault(partName, {})
for i in range(numAnims):
animControl = acc.getAnim(i)
animName = acc.getAnimName(i)
animDef = Actor.AnimDef()
animDef.animControl = animControl
self.__animControlDict[lodName][partName][animName] = animDef
def __prepareBundle(self, bundleNP, partModel,
partName="modelRoot", lodName="lodRoot"):
assert partName not in self.__subpartDict
# Rename the node at the top of the hierarchy, if we
# haven't already, to make it easier to identify this
# actor in the scene graph.
if not self.gotName:
self.node().setName(bundleNP.node().getName())
self.gotName = 1
bundleDict = self.__partBundleDict.get(lodName, None)
if bundleDict == None:
# make a dictionary to store these parts in
bundleDict = {}
self.__partBundleDict[lodName] = bundleDict
self.__updateSortedLODNames()
node = bundleNP.node()
# A model loaded from disk will always have just one bundle.
assert(node.getNumBundles() == 1)
bundleHandle = node.getBundleHandle(0)
if self.mergeLODBundles:
loadedBundleHandle = self.__commonBundleHandles.get(partName, None)
if loadedBundleHandle:
# We've already got a bundle for this part; merge it.
node.mergeBundles(bundleHandle, loadedBundleHandle)
bundleHandle = loadedBundleHandle
else:
# We haven't already got a bundle for this part; store it.
self.__commonBundleHandles[partName] = bundleHandle
bundleDict[partName] = Actor.PartDef(bundleNP, bundleHandle, partModel)
def makeSubpart(self, partName, includeJoints, excludeJoints = [],
parent="modelRoot", overlapping = False):
"""Defines a new "part" of the Actor that corresponds to the
same geometry as the named parent part, but animates only a
certain subset of the joints. This can be used for
partial-body animations, for instance to animate a hand waving
while the rest of the body continues to play its walking
animation.
includeJoints is a list of joint names that are to be animated
by the subpart. Each name can include globbing characters
like '?' or '*', which will match one or any number of
characters, respectively. Including a joint by naming it in
includeJoints implicitly includes all of the descendents of
that joint as well, except for excludeJoints, below.
excludeJoints is a list of joint names that are *not* to be
animated by the subpart. As in includeJoints, each name can
include globbing characters. If a joint is named by
excludeJoints, it will not be included (and neither will any
of its descendents), even if a parent joint was named by
includeJoints.
if overlapping is False, an error is raised (in the dev build)
if this subpart shares joints with any other subparts. If
overlapping is True, no such error is raised.
parent is the actual partName that this subpart is based
on."""
assert partName not in self.__subpartDict
subpartDef = self.__subpartDict.get(parent, Actor.SubpartDef(''))
subset = PartSubset(subpartDef.subset)
for name in includeJoints:
subset.addIncludeJoint(GlobPattern(name))
for name in excludeJoints:
subset.addExcludeJoint(GlobPattern(name))
self.__subpartDict[partName] = Actor.SubpartDef(parent, subset)
if __dev__ and not overlapping and self.validateSubparts.getValue():
# Without the overlapping flag True, we're not allowed to
# define overlapping sub-parts. Verify that we haven't.
for otherPartName, otherPartDef in self.__subpartDict.items():
if otherPartName != partName and otherPartDef.truePartName == parent:
joints = self.getOverlappingJoints(partName, otherPartName)
if joints:
raise Exception('Overlapping joints: %s and %s' % (partName, otherPartName))
def setSubpartsComplete(self, flag):
"""Sets the subpartsComplete flag. This affects the behavior
of play(), loop(), stop(), etc., when no explicit parts are
specified.
When this flag is False (the default), play() with no parts
means to play the animation on the overall Actor, which is a
separate part that overlaps each of the subparts. If you then
play a different animation on a subpart, it may stop the
overall animation (in non-blend mode) or blend with it (in
blend mode).
When this flag is True, play() with no parts means to play the
animation on each of the subparts--instead of on the overall
Actor. In this case, you may then play a different animation
on a subpart, which replaces only that subpart's animation.
It makes sense to set this True when the union of all of your
subparts completely defines the entire Actor.
"""
self.__subpartsComplete = flag
if __dev__ and self.__subpartsComplete and self.validateSubparts.getValue():
# If we've specified any parts at all so far, make sure we've
# specified all of them.
if self.__subpartDict:
self.verifySubpartsComplete()
def getSubpartsComplete(self):
"""See setSubpartsComplete()."""
return self.__subpartsComplete
def verifySubpartsComplete(self, partName = None, lodName = None):
""" Ensures that each joint is defined by at least one
subPart. Prints a warning if this is not the case. """
if partName:
assert partName not in self.__subpartDict
partNames = [partName]
else:
if lodName:
partNames = self.__partBundleDict[lodName].keys()
else:
partNames = next(iter(self.__partBundleDict.values())).keys()
for partName in partNames:
subJoints = set()
for subPartName, subPartDef in self.__subpartDict.items():
if subPartName != partName and subPartDef.truePartName == partName:
subJoints |= set(self.getJoints(partName = subPartName, lodName = lodName))
allJoints = set(self.getJoints(partName = partName, lodName = lodName))
diff = allJoints.difference(subJoints)
if diff:
self.notify.warning('Uncovered joints: %s' % (list(diff)))
def loadAnims(self, anims, partName="modelRoot", lodName="lodRoot"):
"""loadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim loader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults
to 'lodRoot' for non-LOD actors) and dict of corresponding
anims in the form animName:animPath{}
"""
reload = True
if self.mergeLODBundles:
lodNames = ['common']
elif lodName == 'all':
reload = False
lodNames = list(self.switches.keys())
lodNames.sort()
for i in range(0, len(lodNames)):
lodNames[i] = str(lodNames[i])
else:
lodNames = [lodName]
assert Actor.notify.debug("in loadAnims: %s, part: %s, lod: %s" %
(anims, partName, lodNames[0]))
firstLoad = True
if not reload:
try:
self.__animControlDict[lodNames[0]][partName]
firstLoad = False
except:
pass
for lName in lodNames:
if firstLoad:
self.__animControlDict.setdefault(lName, {})
self.__animControlDict[lName].setdefault(partName, {})
for animName, filename in anims.items():
# make sure this lod is in anim control dict
for lName in lodNames:
if firstLoad:
self.__animControlDict[lName][partName][animName] = Actor.AnimDef()
if isinstance(filename, NodePath):
# We were given a pre-load anim bundle, not a filename.
assert not filename.isEmpty()
if filename.node().isOfType(AnimBundleNode.getClassType()):
animBundleNP = filename
else:
animBundleNP = filename.find('**/+AnimBundleNode')
assert not animBundleNP.isEmpty()
self.__animControlDict[lName][partName][animName].animBundle = animBundleNP.node().getBundle()
else:
# We were given a filename that must be loaded.
# Store the filename only; we will load and bind
# it (and produce an AnimControl) when it is
# played.
self.__animControlDict[lName][partName][animName].filename = filename
def initAnimsOnAllLODs(self,partNames):
if self.mergeLODBundles:
lodNames = ['common']
else:
lodNames = self.__partBundleDict.keys()
for lod in lodNames:
for part in partNames:
self.__animControlDict.setdefault(lod,{})
self.__animControlDict[lod].setdefault(part, {})
#for animName, filename in anims.items():
# # make sure this lod is in anim control dict
# for lod in self.__partBundleDict.keys():
# # store the file path only; we will bind it (and produce
# # an AnimControl) when it is played
#
# self.__animControlDict[lod][partName][animName] = Actor.AnimDef(filename)
def loadAnimsOnAllLODs(self, anims,partName="modelRoot"):
"""loadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim loader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults
to 'lodRoot' for non-LOD actors) and dict of corresponding
anims in the form animName:animPath{}
"""
if self.mergeLODBundles:
lodNames = ['common']
else:
lodNames = self.__partBundleDict.keys()
for animName, filename in anims.items():
# make sure this lod is in anim control dict
for lod in lodNames:
# store the file path only; we will bind it (and produce
# an AnimControl) when it is played
self.__animControlDict[lod][partName][animName]= Actor.AnimDef(filename)
def postFlatten(self):
"""Call this after performing an aggressive flatten operation,
such as flattenStrong(), that involves the Actor. This is
especially necessary when mergeLODBundles is true, since this
kind of actor may be broken after a flatten operation; this
method should restore proper Actor functionality. """
if self.mergeLODBundles:
# Re-merge all bundles, and restore the common bundle map.
self.__commonBundleHandles = {}
for lodName, bundleDict in self.__partBundleDict.items():
for partName, partDef in bundleDict.items():
loadedBundleHandle = self.__commonBundleHandles.get(partName, None)
node = partDef.partBundleNP.node()
if loadedBundleHandle:
node.mergeBundles(partDef.partBundleHandle, loadedBundleHandle)
partDef.partBundleHandle = loadedBundleHandle
else:
self.__commonBundleHandles[partName] = partDef.partBundleHandle
# Since we may have merged together some bundles, all of
# our anims are now suspect. Force them to reload.
self.unloadAnims()
def unloadAnims(self, anims=None, partName=None, lodName=None):
"""unloadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim unloader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults to
'lodRoot' for non-LOD actors) and list of animation
names. Deletes the anim control for the given animation and
parts/lods.
If any parameter is None or omitted, it means all of them.
"""
assert Actor.notify.debug("in unloadAnims: %s, part: %s, lod: %s" %
(anims, partName, lodName))
if lodName is None or self.mergeLODBundles:
lodNames = self.__animControlDict.keys()
else:
lodNames = [lodName]
if partName is None:
if len(lodNames) > 0:
partNames = self.__animControlDict[next(iter(lodNames))].keys()
else:
partNames = []
else:
partNames = [partName]
if anims is None:
for lodName in lodNames:
for partName in partNames:
for animDef in self.__animControlDict[lodName][partName].values():
if animDef.animControl != None:
# Try to clear any control effects before we let
# our handle on them go. This is especially
# important if the anim control was blending
# animations.
animDef.animControl.getPart().clearControlEffects()
animDef.animControl = None
else:
for lodName in lodNames:
for partName in partNames:
for anim in anims:
animDef = self.__animControlDict[lodName][partName].get(anim)
if animDef and animDef.animControl != None:
# Try to clear any control effects before we let
# our handle on them go. This is especially
# important if the anim control was blending
# animations.
animDef.animControl.getPart().clearControlEffects()
animDef.animControl = None
def bindAnim(self, animName, partName = None, lodName = None,
allowAsyncBind = False):
"""
Binds the named animation to the named part and/or lod. If
allowAsyncBind is False, this guarantees that the animation is
bound immediately--the animation is never bound in a
sub-thread; it will be loaded and bound in the main thread, so
it will be available by the time this method returns.
The parameters are the same as that for getAnimControls(). In
fact, this method is a thin wrapper around that other method.
Use this method if you need to ensure that an animation is
available before you start to play it, and you don't mind
holding up the render for a frame or two until the animation
is available.
"""
self.getAnimControls(animName = animName, partName = partName,
lodName = lodName,
allowAsyncBind = allowAsyncBind)
def bindAllAnims(self, allowAsyncBind = False):
"""Loads and binds all animations that have been defined for
the Actor. """
self.getAnimControls(animName = True, allowAsyncBind = allowAsyncBind)
def waitPending(self, partName = None):
"""Blocks until all asynchronously pending animations (that
are currently playing) have been loaded and bound the the
Actor. Call this after calling play() if you are using
asynchronous binds, but you need this particular animation
to be loaded immediately. """
for bundle in self.getPartBundles(partName = partName):
bundle.waitPending()
def __bindAnimToPart(self, animName, partName, lodName,
allowAsyncBind = True):
"""
Binds the named animation to the named part/lod and returns
the associated animControl. The animation is loaded and bound
in a sub-thread, if allowAsyncBind is True,
self.allowAsyncBind is True, threading is enabled, and the
animation has a preload table generated for it (e.g. via
"egg-optchar -preload"). Even though the animation may or may
not be yet bound at the time this function returns, a usable
animControl is returned, or None if the animation could not be
bound.
"""
# make sure this anim is in the dict
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDict = self.__animControlDict[lodName]
animDict = partDict.get(partName)
if animDict == None:
# It must be a subpart that hasn't been bound yet.
animDict = {}
partDict[partName] = animDict
anim = animDict.get(animName)
if anim == None:
# It must be a subpart that hasn't been bound yet.
anim = partDict[subpartDef.truePartName].get(animName)
anim = anim.makeCopy()
animDict[animName] = anim
if anim == None:
Actor.notify.error("actor has no animation %s", animName)
# only bind if not already bound!
if anim.animControl:
return anim.animControl
if self.mergeLODBundles:
bundle = self.__commonBundleHandles[subpartDef.truePartName].getBundle()
else:
bundle = self.__partBundleDict[lodName][subpartDef.truePartName].getBundle()
if anim.animBundle:
# We already have a bundle; just bind it.
animControl = bundle.bindAnim(anim.animBundle, -1, subpartDef.subset)
else:
# Load and bind the anim. This might be an asynchronous
# operation that will complete in the background, but if so it
# will still return a usable AnimControl.
animControl = bundle.loadBindAnim(
self.loader, Filename(anim.filename), -1,
subpartDef.subset, allowAsyncBind and self.allowAsyncBind)
if not animControl:
# Couldn't bind. (This implies the binding operation was
# not attempted asynchronously.)
return None
# store the animControl
anim.animControl = animControl
assert Actor.notify.debug("binding anim: %s to part: %s, lod: %s" %
(animName, partName, lodName))
return animControl
def __copyPartBundles(self, other):
"""__copyPartBundles(self, Actor)
Copy the part bundle dictionary from another actor as this
instance's own. NOTE: this method does not actually copy geometry
"""
for lodName in other.__partBundleDict:
# find the lod Asad
if lodName == 'lodRoot':
partLod = self
else:
partLod = self.__LODNode.find(str(lodName))
if partLod.isEmpty():
Actor.notify.warning("no lod named: %s" % (lodName))
return None
for partName, partDef in other.__partBundleDict[lodName].items():
# We can really only copy from a non-flattened avatar.
assert partDef.partBundleNP.node().getNumBundles() == 1
# find the part in our tree
bundleNP = partLod.find("**/%s%s"%(Actor.partPrefix,partName))
if (bundleNP != None):
# store the part bundle
self.__prepareBundle(bundleNP, partDef.partModel,
partName, lodName)
else:
Actor.notify.error("lod: %s has no matching part: %s" %
(lodName, partName))
def __copySubpartDict(self, other):
"""Copies the subpartDict from another as this instance's own.
This makes a deep copy of the map and all of the names and
PartSubset objects within it. We can't use copy.deepcopy()
because of the included C++ PartSubset objects."""
self.__subpartDict = {}
for partName, subpartDef in other.__subpartDict.items():
subpartDefCopy = subpartDef
if subpartDef:
subpartDef = subpartDef.makeCopy()
self.__subpartDict[partName] = subpartDef
def __copyAnimControls(self, other):
"""__copyAnimControls(self, Actor)
Get the anims from the anim control's in the anim control
dictionary of another actor. Bind these anim's to the part
bundles in our part bundle dict that have matching names, and
store the resulting anim controls in our own part bundle dict"""
assert(other.mergeLODBundles == self.mergeLODBundles)
for lodName in other.__animControlDict:
self.__animControlDict[lodName] = {}
for partName in other.__animControlDict[lodName]:
self.__animControlDict[lodName][partName] = {}
for animName in other.__animControlDict[lodName][partName]:
anim = other.__animControlDict[lodName][partName][animName]
anim = anim.makeCopy()
self.__animControlDict[lodName][partName][animName] = anim
def actorInterval(self, *args, **kw):
from direct.interval import ActorInterval
return ActorInterval.ActorInterval(self, *args, **kw)
def getAnimBlends(self, animName=None, partName=None, lodName=None):
""" Returns a list of the form:
[ (lodName, [(animName, [(partName, effect), (partName, effect), ...]),
(animName, [(partName, effect), (partName, effect), ...]),
...]),
(lodName, [(animName, [(partName, effect), (partName, effect), ...]),
(animName, [(partName, effect), (partName, effect), ...]),
...]),
... ]
This list reports the non-zero control effects for each
partName within a particular animation and LOD. """
result = []
if animName is None:
animNames = self.getAnimNames()
else:
animNames = [animName]
if lodName is None:
lodNames = self.getLODNames()
if self.mergeLODBundles:
lodNames = lodNames[:1]
else:
lodNames = [lodName]
if partName == None and self.__subpartsComplete:
partNames = self.__subpartDict.keys()
else:
partNames = [partName]
for lodName in lodNames:
animList = []
for animName in animNames:
blendList = []
for partName in partNames:
control = self.getAnimControl(animName, partName, lodName)
if control:
part = control.getPart()
effect = part.getControlEffect(control)
if effect > 0.:
blendList.append((partName, effect))
if blendList:
animList.append((animName, blendList))
if animList:
result.append((lodName, animList))
return result
def printAnimBlends(self, animName=None, partName=None, lodName=None):
for lodName, animList in self.getAnimBlends(animName, partName, lodName):
print('LOD %s:' % (lodName))
for animName, blendList in animList:
list = []
for partName, effect in blendList:
list.append('%s:%.3f' % (partName, effect))
print(' %s: %s' % (animName, ', '.join(list)))
def osdAnimBlends(self, animName=None, partName=None, lodName=None):
if not onScreenDebug.enabled:
return
# puts anim blending info into the on-screen debug panel
if animName is None:
animNames = self.getAnimNames()
else:
animNames = [animName]
for animName in animNames:
if animName is 'nothing':
continue
thisAnim = ''
totalEffect = 0.
controls = self.getAnimControls(animName, partName, lodName)
for control in controls:
part = control.getPart()
name = part.getName()
effect = part.getControlEffect(control)
if effect > 0.:
totalEffect += effect
thisAnim += ('%s:%.3f, ' % (name, effect))
thisAnim += "\n"
for control in controls:
part = control.getPart()
name = part.getName()
rate = control.getPlayRate()
thisAnim += ('%s:%.1f, ' % (name, rate))
# don't display anything if this animation is not being played
itemName = 'anim %s' % animName
if totalEffect > 0.:
onScreenDebug.add(itemName, thisAnim)
else:
if onScreenDebug.has(itemName):
onScreenDebug.remove(itemName)
# these functions compensate for actors that are modeled facing the viewer but need
# to face away from the camera in the game
def faceAwayFromViewer(self):
self.getGeomNode().setH(180)
def faceTowardsViewer(self):
self.getGeomNode().setH(0)
def renamePartBundles(self, partName, newBundleName):
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
for partBundleDict in self.__partBundleDict.values():
partDef = partBundleDict.get(subpartDef.truePartName)
partDef.getBundle().setName(newBundleName)
| {
"repo_name": "tobspr/panda3d",
"path": "direct/src/actor/Actor.py",
"copies": "3",
"size": "105172",
"license": "bsd-3-clause",
"hash": -581293755250376700,
"line_mean": 40.227753822,
"line_max": 116,
"alpha_frac": 0.5728520899,
"autogenerated": false,
"ratio": 4.389299277993406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026742961775317827,
"num_lines": 2551
} |
"""Actor module: contains the Actor class"""
__all__ = ['Actor']
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import LODNode
import types, copy
class Actor(DirectObject, NodePath):
"""
Actor class: Contains methods for creating, manipulating
and playing animations on characters
"""
notify = DirectNotifyGlobal.directNotify.newCategory("Actor")
partPrefix = "__Actor_"
modelLoaderOptions = LoaderOptions(LoaderOptions.LFSearch |
LoaderOptions.LFReportErrors |
LoaderOptions.LFConvertSkeleton)
animLoaderOptions = LoaderOptions(LoaderOptions.LFSearch |
LoaderOptions.LFReportErrors |
LoaderOptions.LFConvertAnim)
validateSubparts = ConfigVariableBool('validate-subparts', True)
class PartDef:
"""Instances of this class are stored within the
PartBundleDict to track all of the individual PartBundles
associated with the Actor. In general, each separately loaded
model file is a different PartBundle. This can include the
multiple different LOD's, as well as the multiple different
pieces of a multipart Actor. """
def __init__(self, partBundleNP, partBundleHandle, partModel):
# We also save the ModelRoot node along with the
# PartBundle, so that the reference count in the ModelPool
# will be accurate.
self.partBundleNP = partBundleNP
self.partBundleHandle = partBundleHandle
self.partModel = partModel
def getBundle(self):
return self.partBundleHandle.getBundle()
def __repr__(self):
return 'Actor.PartDef(%s, %s)' % (repr(self.partBundleNP), repr(self.partModel))
class AnimDef:
"""Instances of this class are stored within the
AnimControlDict to track all of the animations associated with
the Actor. This includes animations that have already been
bound (these have a valid AnimControl) as well as those that
have not yet been bound (for these, self.animControl is None).
There is a different AnimDef for each different part or
sub-part, times each different animation in the AnimDict. """
def __init__(self, filename = None, animBundle = None):
self.filename = filename
self.animBundle = None
self.animControl = None
def makeCopy(self):
return Actor.AnimDef(self.filename, self.animBundle)
def __repr__(self):
return 'Actor.AnimDef(%s)' % (repr(self.filename))
class SubpartDef:
"""Instances of this class are stored within the SubpartDict
to track the existance of arbitrary sub-parts. These are
designed to appear to the user to be identical to true "part"
of a multi-part Actor, but in fact each subpart represents a
subset of the joints of an existing part (which is accessible
via a different name). """
def __init__(self, truePartName, subset = PartSubset()):
self.truePartName = truePartName
self.subset = subset
def makeCopy(self):
return Actor.SubpartDef(self.truePartName, PartSubset(self.subset))
def __repr__(self):
return 'Actor.SubpartDef(%s, %s)' % (repr(self.truePartName), repr(self.subset))
def __init__(self, models=None, anims=None, other=None, copy=True,
lodNode = None, flattenable = True, setFinal = False,
mergeLODBundles = None, allowAsyncBind = None,
okMissing = None):
"""__init__(self, string | string:string{}, string:string{} |
string:(string:string{}){}, Actor=None)
Actor constructor: can be used to create single or multipart
actors. If another Actor is supplied as an argument this
method acts like a copy constructor. Single part actors are
created by calling with a model and animation dictionary
(animName:animPath{}) as follows:
a = Actor("panda-3k.egg", {"walk":"panda-walk.egg" \
"run":"panda-run.egg"})
This could be displayed and animated as such:
a.reparentTo(render)
a.loop("walk")
a.stop()
Multipart actors expect a dictionary of parts and a dictionary
of animation dictionaries (partName:(animName:animPath{}){}) as
below:
a = Actor(
# part dictionary
{"head":"char/dogMM/dogMM_Shorts-head-mod", \
"torso":"char/dogMM/dogMM_Shorts-torso-mod", \
"legs":"char/dogMM/dogMM_Shorts-legs-mod"}, \
# dictionary of anim dictionaries
{"head":{"walk":"char/dogMM/dogMM_Shorts-head-walk", \
"run":"char/dogMM/dogMM_Shorts-head-run"}, \
"torso":{"walk":"char/dogMM/dogMM_Shorts-torso-walk", \
"run":"char/dogMM/dogMM_Shorts-torso-run"}, \
"legs":{"walk":"char/dogMM/dogMM_Shorts-legs-walk", \
"run":"char/dogMM/dogMM_Shorts-legs-run"} \
})
In addition multipart actor parts need to be connected together
in a meaningful fashion:
a.attach("head", "torso", "joint-head")
a.attach("torso", "legs", "joint-hips")
#
# ADD LOD COMMENT HERE!
#
Other useful Actor class functions:
#fix actor eye rendering
a.drawInFront("joint-pupil?", "eyes*")
#fix bounding volumes - this must be done after drawing
#the actor for a few frames, otherwise it has no effect
a.fixBounds()
"""
try:
self.Actor_initialized
return
except:
self.Actor_initialized = 1
# initialize our NodePath essence
NodePath.__init__(self)
# Set the mergeLODBundles flag. If this is true, all
# different LOD's will be merged into a single common bundle
# (joint hierarchy). All LOD's will thereafter share the same
# skeleton, even though they may have been loaded from
# different egg files. If this is false, LOD's will be kept
# completely isolated, and each LOD will have its own
# skeleton.
# When this flag is true, __animControlDict has only one key,
# ['common']; when it is false, __animControlDict has one key
# per each LOD name.
if mergeLODBundles == None:
# If this isn't specified, it comes from the Config.prc
# file.
self.mergeLODBundles = base.config.GetBool('merge-lod-bundles', True)
else:
self.mergeLODBundles = mergeLODBundles
# Set the allowAsyncBind flag. If this is true, it enables
# asynchronous animation binding. This requires that you have
# run "egg-optchar -preload" on your animation and models to
# generate the appropriate AnimPreloadTable.
if allowAsyncBind == None:
self.allowAsyncBind = base.config.GetBool('allow-async-bind', True)
else:
self.allowAsyncBind = allowAsyncBind
# create data structures
self.__commonBundleHandles = {}
self.__partBundleDict = {}
self.__subpartDict = {}
self.__sortedLODNames = []
self.__animControlDict = {}
self.__subpartsComplete = False
self.__LODNode = None
self.__LODAnimation = None
self.__LODCenter = Point3(0, 0, 0)
self.switches = None
if (other == None):
# act like a normal constructor
# create base hierarchy
self.gotName = 0
if flattenable:
# If we want a flattenable Actor, don't create all
# those ModelNodes, and the GeomNode is the same as
# the root.
root = PandaNode('actor')
self.assign(NodePath(root))
self.setGeomNode(NodePath(self))
else:
# A standard Actor has a ModelNode at the root, and
# another ModelNode to protect the GeomNode.
root = ModelNode('actor')
root.setPreserveTransform(1)
self.assign(NodePath(root))
self.setGeomNode(self.attachNewNode(ModelNode('actorGeom')))
self.__hasLOD = 0
# load models
#
# four cases:
#
# models, anims{} = single part actor
# models{}, anims{} = single part actor w/ LOD
# models{}, anims{}{} = multi-part actor
# models{}{}, anims{}{} = multi-part actor w/ LOD
#
# make sure we have models
if (models):
# do we have a dictionary of models?
if (type(models)==type({})):
# if this is a dictionary of dictionaries
if (type(models[models.keys()[0]]) == type({})):
# then it must be a multipart actor w/LOD
self.setLODNode(node = lodNode)
# preserve numerical order for lod's
# this will make it easier to set ranges
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
# make a node under the LOD switch
# for each lod (just because!)
self.addLOD(str(lodName))
# iterate over both dicts
for modelName in models[lodName].keys():
self.loadModel(models[lodName][modelName],
modelName, lodName, copy = copy,
okMissing = okMissing)
# then if there is a dictionary of dictionaries of anims
elif (type(anims[anims.keys()[0]])==type({})):
# then this is a multipart actor w/o LOD
for partName in models.keys():
# pass in each part
self.loadModel(models[partName], partName,
copy = copy, okMissing = okMissing)
else:
# it is a single part actor w/LOD
self.setLODNode(node = lodNode)
# preserve order of LOD's
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
self.addLOD(str(lodName))
# pass in dictionary of parts
self.loadModel(models[lodName], lodName=lodName,
copy = copy, okMissing = okMissing)
else:
# else it is a single part actor
self.loadModel(models, copy = copy, okMissing = okMissing)
# load anims
# make sure the actor has animations
if (anims):
if (len(anims) >= 1):
# if so, does it have a dictionary of dictionaries?
if (type(anims[anims.keys()[0]])==type({})):
# are the models a dict of dicts too?
if (type(models)==type({})):
if (type(models[models.keys()[0]]) == type({})):
# then we have a multi-part w/ LOD
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
# iterate over both dicts
for partName in anims.keys():
self.loadAnims(
anims[partName], partName, lodName)
else:
# then it must be multi-part w/o LOD
for partName in anims.keys():
self.loadAnims(anims[partName], partName)
elif (type(models)==type({})):
# then we have single-part w/ LOD
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
self.loadAnims(anims, lodName=lodName)
else:
# else it is single-part w/o LOD
self.loadAnims(anims)
else:
self.copyActor(other, True) # overwrite everything
if setFinal:
# If setFinal is true, the Actor will set its top bounding
# volume to be the "final" bounding volume: the bounding
# volumes below the top volume will not be tested. If a
# cull test passes the top bounding volume, the whole
# Actor is rendered.
# We do this partly because an Actor is likely to be a
# fairly small object relative to the scene, and is pretty
# much going to be all onscreen or all offscreen anyway;
# and partly because of the Character bug that doesn't
# update the bounding volume for pieces that animate away
# from their original position. It's disturbing to see
# someone's hands disappear; better to cull the whole
# object or none of it.
self.__geomNode.node().setFinal(1)
def delete(self):
try:
self.Actor_deleted
return
except:
self.Actor_deleted = 1
self.cleanup()
def copyActor(self, other, overwrite=False):
# act like a copy constructor
self.gotName = other.gotName
# copy the scene graph elements of other
if (overwrite):
otherCopy = other.copyTo(NodePath())
otherCopy.detachNode()
# assign these elements to ourselve (overwrite)
self.assign(otherCopy)
else:
# just copy these to ourselves
otherCopy = other.copyTo(self)
# masad: check if otherCopy has a geomNode as its first child
# if actor is initialized with flattenable, then otherCopy, not
# its first child, is the geom node; check __init__, for reference
if other.getGeomNode().getName() == other.getName():
self.setGeomNode(otherCopy)
else:
self.setGeomNode(otherCopy.getChild(0))
# copy the switches for lods
self.switches = other.switches
self.__LODNode = self.find('**/+LODNode')
self.__hasLOD = 0
if (not self.__LODNode.isEmpty()):
self.__hasLOD = 1
# copy the part dictionary from other
self.__copyPartBundles(other)
self.__copySubpartDict(other)
self.__subpartsComplete = other.__subpartsComplete
# copy the anim dictionary from other
self.__copyAnimControls(other)
def __cmp__(self, other):
# Actor inherits from NodePath, which inherits a definition of
# __cmp__ from FFIExternalObject that uses the NodePath's
# compareTo() method to compare different NodePaths. But we
# don't want this behavior for Actors; Actors should only be
# compared pointerwise. A NodePath that happens to reference
# the same node is still different from the Actor.
if self is other:
return 0
else:
return 1
def __str__(self):
"""
Actor print function
"""
return "Actor %s, parts = %s, LODs = %s, anims = %s" % \
(self.getName(), self.getPartNames(), self.getLODNames(), self.getAnimNames())
def listJoints(self, partName="modelRoot", lodName="lodRoot"):
"""Handy utility function to list the joint hierarchy of the
actor. """
if self.mergeLODBundles:
partBundleDict = self.__commonBundleHandles
else:
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.error("no lod named: %s" % (lodName))
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef == None:
Actor.notify.error("no part named: %s" % (partName))
self.__doListJoints(0, partDef.getBundle(),
subpartDef.subset.isIncludeEmpty(), subpartDef.subset)
def __doListJoints(self, indentLevel, part, isIncluded, subset):
name = part.getName()
if subset.matchesInclude(name):
isIncluded = True
elif subset.matchesExclude(name):
isIncluded = False
if isIncluded:
value = ''
if hasattr(part, 'outputValue'):
lineStream = LineStream()
part.outputValue(lineStream)
value = lineStream.getLine()
print ' ' * indentLevel, part.getName(), value
for i in range(part.getNumChildren()):
self.__doListJoints(indentLevel + 2, part.getChild(i),
isIncluded, subset)
def getActorInfo(self):
"""
Utility function to create a list of information about an actor.
Useful for iterating over details of an actor.
"""
lodInfo = []
for lodName, partDict in self.__animControlDict.items():
if self.mergeLODBundles:
lodName = self.__sortedLODNames[0]
partInfo = []
for partName in partDict.keys():
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partBundleDict = self.__partBundleDict.get(lodName)
partDef = partBundleDict.get(subpartDef.truePartName)
partBundle = partDef.getBundle()
animDict = partDict[partName]
animInfo = []
for animName in animDict.keys():
file = animDict[animName].filename
animControl = animDict[animName].animControl
animInfo.append([animName, file, animControl])
partInfo.append([partName, partBundle, animInfo])
lodInfo.append([lodName, partInfo])
return lodInfo
def getAnimNames(self):
animNames = []
for lodName, lodInfo in self.getActorInfo():
for partName, bundle, animInfo in lodInfo:
for animName, file, animControl in animInfo:
if animName not in animNames:
animNames.append(animName)
return animNames
def pprint(self):
"""
Pretty print actor's details
"""
for lodName, lodInfo in self.getActorInfo():
print 'LOD:', lodName
for partName, bundle, animInfo in lodInfo:
print ' Part:', partName
print ' Bundle:', repr(bundle)
for animName, file, animControl in animInfo:
print ' Anim:', animName
print ' File:', file
if animControl == None:
print ' (not loaded)'
else:
print (' NumFrames: %d PlayRate: %0.2f' %
(animControl.getNumFrames(),
animControl.getPlayRate()))
def cleanup(self):
"""
Actor cleanup function
"""
self.stop(None)
self.clearPythonData()
self.flush()
if(self.__geomNode):
self.__geomNode.removeNode()
self.__geomNode = None
if not self.isEmpty():
self.removeNode()
def removeNode(self):
if self.__geomNode and (self.__geomNode.getNumChildren() > 0):
assert self.notify.warning("called actor.removeNode() on %s without calling cleanup()" % self.getName())
NodePath.removeNode(self)
def clearPythonData(self):
self.__commonBundleHandles = {}
self.__partBundleDict = {}
self.__subpartDict = {}
self.__sortedLODNames = []
self.__animControlDict = {}
def flush(self):
"""
Actor flush function
"""
self.clearPythonData()
if self.__LODNode and (not self.__LODNode.isEmpty()):
self.__LODNode.removeNode()
self.__LODNode = None
# remove all its children
if(self.__geomNode):
self.__geomNode.removeChildren()
self.__hasLOD = 0
# accessing
def getAnimControlDict(self):
return self.__animControlDict
def removeAnimControlDict(self):
self.__animControlDict = {}
def getPartBundleDict(self):
return self.__partBundleDict
def getPartBundles(self, partName = None):
""" Returns a list of PartBundle objects for the entire Actor,
or for the indicated part only. """
bundles = []
for lodName, partBundleDict in self.__partBundleDict.items():
if partName == None:
for partDef in partBundleDict.values():
bundles.append(partDef.getBundle())
else:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
bundles.append(partDef.getBundle())
else:
Actor.notify.warning("Couldn't find part: %s" % (partName))
return bundles
def __updateSortedLODNames(self):
# Cache the sorted LOD names so we don't have to grab them
# and sort them every time somebody asks for the list
self.__sortedLODNames = self.__partBundleDict.keys()
# Reverse sort the doing a string->int
def sortKey(x):
if not str(x).isdigit():
smap = {'h':3,
'm':2,
'l':1,
'f':0}
"""
sx = smap.get(x[0], None)
if sx is None:
self.notify.error('Invalid lodName: %s' % x)
"""
return smap[x[0]]
else:
return int(x)
self.__sortedLODNames.sort(key=sortKey, reverse=True)
def getLODNames(self):
"""
Return list of Actor LOD names. If not an LOD actor,
returns 'lodRoot'
Caution - this returns a reference to the list - not your own copy
"""
return self.__sortedLODNames
def getPartNames(self):
"""
Return list of Actor part names. If not an multipart actor,
returns 'modelRoot' NOTE: returns parts of arbitrary LOD
"""
partNames = []
if self.__partBundleDict:
partNames = self.__partBundleDict.values()[0].keys()
return partNames + self.__subpartDict.keys()
def getGeomNode(self):
"""
Return the node that contains all actor geometry
"""
return self.__geomNode
def setGeomNode(self, node):
"""
Set the node that contains all actor geometry
"""
self.__geomNode = node
def getLODNode(self):
"""
Return the node that switches actor geometry in and out"""
return self.__LODNode.node()
def setLODNode(self, node=None):
"""
Set the node that switches actor geometry in and out.
If one is not supplied as an argument, make one
"""
if (node == None):
node = LODNode.makeDefaultLod("lod")
if self.__LODNode:
self.__LODNode = node
else:
self.__LODNode = self.__geomNode.attachNewNode(node)
self.__hasLOD = 1
self.switches = {}
def useLOD(self, lodName):
"""
Make the Actor ONLY display the given LOD
"""
# make sure we don't call this twice in a row
# and pollute the the switches dictionary
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
child = self.__LODNode.find(str(lodName))
index = self.__LODNode.node().findChild(child.node())
self.__LODNode.node().forceSwitch(index)
def printLOD(self):
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
sortedKeys = self.__sortedLODNames
for eachLod in sortedKeys:
print "python switches for %s: in: %d, out %d" % (eachLod,
self.switches[eachLod][0],
self.switches[eachLod][1])
switchNum = self.__LODNode.node().getNumSwitches()
for eachSwitch in range(0, switchNum):
print "c++ switches for %d: in: %d, out: %d" % (eachSwitch,
self.__LODNode.node().getIn(eachSwitch),
self.__LODNode.node().getOut(eachSwitch))
def resetLOD(self):
"""
Restore all switch distance info (usually after a useLOD call)"""
self.__LODNode.node().clearForceSwitch()
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
## for eachLod in sortedKeys:
## index = sortedKeys.index(eachLod)
## self.__LODNode.node().setSwitch(index, self.switches[eachLod][0],
## self.switches[eachLod][1])
def addLOD(self, lodName, inDist=0, outDist=0, center=None):
"""addLOD(self, string)
Add a named node under the LODNode to parent all geometry
of a specific LOD under.
"""
self.__LODNode.attachNewNode(str(lodName))
# save the switch distance info
self.switches[lodName] = [inDist, outDist]
# add the switch distance info
self.__LODNode.node().addSwitch(inDist, outDist)
if center != None:
self.setCenter(center)
def setLOD(self, lodName, inDist=0, outDist=0):
"""setLOD(self, string)
Set the switch distance for given LOD
"""
# save the switch distance info
self.switches[lodName] = [inDist, outDist]
# add the switch distance info
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
self.__LODNode.node().setSwitch(self.getLODIndex(lodName), inDist, outDist)
def getLODIndex(self, lodName):
"""getLODIndex(self)
safe method (but expensive) for retrieving the child index
"""
return list(self.__LODNode.getChildren()).index(self.getLOD(lodName))
def getLOD(self, lodName):
"""getLOD(self, string)
Get the named node under the LOD to which we parent all LOD
specific geometry to. Returns 'None' if not found
"""
if self.__LODNode:
lod = self.__LODNode.find(str(lodName))
if lod.isEmpty():
return None
else:
return lod
else:
return None
def hasLOD(self):
"""
Return 1 if the actor has LODs, 0 otherwise
"""
return self.__hasLOD
def setCenter(self, center):
if center == None:
center = Point3(0, 0, 0)
self.__LODCenter = center
if self.__LODNode:
self.__LODNode.node().setCenter(self.__LODCenter)
if self.__LODAnimation:
self.setLODAnimation(*self.__LODAnimation)
def setLODAnimation(self, farDistance, nearDistance, delayFactor):
""" Activates a special mode in which the Actor animates less
frequently as it gets further from the camera. This is
intended as a simple optimization to minimize the effort of
computing animation for lots of characters that may not
necessarily be very important to animate every frame.
If the character is closer to the camera than near_distance,
then it is animated its normal rate, every frame. If the
character is exactly far_distance away, it is animated only
every delay_factor seconds (which should be a number greater
than 0). If the character is between near_distance and
far_distance, its animation rate is linearly interpolated
according to its distance between the two. The interpolation
function continues beyond far_distance, so that the character
is animated increasingly less frequently as it gets farther
away. """
self.__LODAnimation = (farDistance, nearDistance, delayFactor)
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().setLodAnimation(self.__LODCenter, farDistance, nearDistance, delayFactor)
def clearLODAnimation(self):
""" Description: Undoes the effect of a recent call to
set_lod_animation(). Henceforth, the character will animate
every frame, regardless of its distance from the camera.
"""
self.__LODAnimation = None
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().clearLodAnimation()
def update(self, lod=0, partName=None, lodName=None, force=False):
""" Updates all of the Actor's joints in the indicated LOD.
The LOD may be specified by name, or by number, where 0 is the
highest level of detail, 1 is the next highest, and so on.
If force is True, this will update every joint, even if we
don't believe it's necessary.
Returns True if any joint has changed as a result of this,
False otherwise. """
if lodName == None:
lodNames = self.getLODNames()
else:
lodNames = [lodName]
anyChanged = False
if lod < len(lodNames):
lodName = lodNames[lod]
if partName == None:
partBundleDict = self.__partBundleDict[lodName]
partNames = partBundleDict.keys()
else:
partNames = [partName]
for partName in partNames:
partBundle = self.getPartBundle(partName, lodNames[lod])
if force:
if partBundle.forceUpdate():
anyChanged = True
else:
if partBundle.update():
anyChanged = True
else:
self.notify.warning('update() - no lod: %d' % lod)
return anyChanged
def getFrameRate(self, animName=None, partName=None):
"""getFrameRate(self, string, string=None)
Return actual frame rate of given anim name and given part.
If no anim specified, use the currently playing anim.
If no part specified, return anim durations of first part.
NOTE: returns info only for an arbitrary LOD
"""
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getFrameRate()
def getBaseFrameRate(self, animName=None, partName=None):
"""getBaseFrameRate(self, string, string=None)
Return frame rate of given anim name and given part, unmodified
by any play rate in effect.
"""
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getAnim().getBaseFrameRate()
def getPlayRate(self, animName=None, partName=None):
"""
Return the play rate of given anim for a given part.
If no part is given, assume first part in dictionary.
If no anim is given, find the current anim for the part.
NOTE: Returns info only for an arbitrary LOD
"""
if self.__animControlDict:
# use the first lod
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if controls:
return controls[0].getPlayRate()
return None
def setPlayRate(self, rate, animName, partName=None):
"""setPlayRate(self, float, string, string=None)
Set the play rate of given anim for a given part.
If no part is given, set for all parts in dictionary.
It used to be legal to let the animName default to the
currently-playing anim, but this was confusing and could lead
to the wrong anim's play rate getting set. Better to insist
on this parameter.
NOTE: sets play rate on all LODs"""
for control in self.getAnimControls(animName, partName):
control.setPlayRate(rate)
def getDuration(self, animName=None, partName=None,
fromFrame=None, toFrame=None):
"""
Return duration of given anim name and given part.
If no anim specified, use the currently playing anim.
If no part specified, return anim duration of first part.
NOTE: returns info for arbitrary LOD
"""
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
animControl = controls[0]
if fromFrame is None:
fromFrame = 0
if toFrame is None:
toFrame = animControl.getNumFrames()-1
return ((toFrame+1)-fromFrame) / animControl.getFrameRate()
def getNumFrames(self, animName=None, partName=None):
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getNumFrames()
def getFrameTime(self, anim, frame, partName=None):
numFrames = self.getNumFrames(anim,partName)
animTime = self.getDuration(anim,partName)
frameTime = animTime * float(frame) / numFrames
return frameTime
def getCurrentAnim(self, partName=None):
"""
Return the anim currently playing on the actor. If part not
specified return current anim of an arbitrary part in dictionary.
NOTE: only returns info for an arbitrary LOD
"""
if len(self.__animControlDict.items()) == 0:
return
lodName, animControlDict = self.__animControlDict.items()[0]
if partName == None:
partName, animDict = animControlDict.items()[0]
else:
animDict = animControlDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
return None
# loop through all anims for named part and find if any are playing
for animName, anim in animDict.items():
if anim.animControl and anim.animControl.isPlaying():
return animName
# we must have found none, or gotten an error
return None
def getCurrentFrame(self, animName=None, partName=None):
"""
Return the current frame number of the named anim, or if no
anim is specified, then the anim current playing on the
actor. If part not specified return current anim of first part
in dictionary. NOTE: only returns info for an arbitrary LOD
"""
lodName, animControlDict = self.__animControlDict.items()[0]
if partName == None:
partName, animDict = animControlDict.items()[0]
else:
animDict = animControlDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
return None
if animName:
anim = animDict.get(animName)
if not anim:
Actor.notify.warning("couldn't find anim: %s" % (animName))
elif anim.animControl:
return anim.animControl.getFrame()
else:
# loop through all anims for named part and find if any are playing
for animName, anim in animDict.items():
if anim.animControl and anim.animControl.isPlaying():
return anim.animControl.getFrame()
# we must have found none, or gotten an error
return None
# arranging
def getPart(self, partName, lodName="lodRoot"):
"""
Find the named part in the optional named lod and return it, or
return None if not present
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
return partDef.partBundleNP
return None
def getPartBundle(self, partName, lodName="lodRoot"):
"""
Find the named part in the optional named lod and return its
associated PartBundle, or return None if not present
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
return partDef.getBundle()
return None
def removePart(self, partName, lodName="lodRoot"):
"""
Remove the geometry and animations of the named part of the
optional named lod if present.
NOTE: this will remove child geometry also!
"""
# find the corresponding part bundle dict
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
# remove the part
if (partName in partBundleDict):
partBundleDict[partName].partBundleNP.removeNode()
del(partBundleDict[partName])
# find the corresponding anim control dict
if self.mergeLODBundles:
lodName = 'common'
partDict = self.__animControlDict.get(lodName)
if not partDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
# remove the animations
if (partName in partDict):
del(partDict[partName])
def hidePart(self, partName, lodName="lodRoot"):
"""
Make the given part of the optionally given lod not render,
even though still in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.hide()
else:
Actor.notify.warning("no part named %s!" % (partName))
def showPart(self, partName, lodName="lodRoot"):
"""
Make the given part render while in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.show()
else:
Actor.notify.warning("no part named %s!" % (partName))
def showAllParts(self, partName, lodName="lodRoot"):
"""
Make the given part and all its children render while in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.show()
partDef.partBundleNP.getChildren().show()
else:
Actor.notify.warning("no part named %s!" % (partName))
def exposeJoint(self, node, partName, jointName, lodName="lodRoot",
localTransform = 0):
"""exposeJoint(self, NodePath, string, string, key="lodRoot")
Starts the joint animating the indicated node. As the joint
animates, it will transform the node by the corresponding
amount. This will replace whatever matrix is on the node each
frame. The default is to expose the net transform from the root,
but if localTransform is true, only the node's local transform
from its parent is exposed."""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
# Get a handle to the joint.
joint = bundle.findChild(jointName)
if node == None:
node = self.attachNewNode(jointName)
if (joint):
if localTransform:
joint.addLocalTransform(node.node())
else:
joint.addNetTransform(node.node())
else:
Actor.notify.warning("no joint named %s!" % (jointName))
return node
def stopJoint(self, partName, jointName, lodName="lodRoot"):
"""stopJoint(self, string, string, key="lodRoot")
Stops the joint from animating external nodes. If the joint
is animating a transform on a node, this will permanently stop
it. However, this does not affect vertex animations."""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
# Get a handle to the joint.
joint = bundle.findChild(jointName)
if (joint):
joint.clearNetTransforms()
joint.clearLocalTransforms()
else:
Actor.notify.warning("no joint named %s!" % (jointName))
def getJoints(self, partName = None, jointName = '*', lodName = None):
""" Returns the list of all joints, from the named part or
from all parts, that match the indicated jointName. The
jointName may include pattern characters like *. """
joints=[]
pattern = GlobPattern(jointName)
if lodName == None and self.mergeLODBundles:
# Get the common bundle.
partBundleDicts = [self.__commonBundleHandles]
elif lodName == None:
# Get all LOD's.
partBundleDicts = self.__partBundleDict.values()
else:
# Get one LOD.
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("couldn't find lod: %s" % (lodName))
return []
partBundleDicts = [partBundleDict]
for partBundleDict in partBundleDicts:
parts = []
if partName:
subpartDef = self.__subpartDict.get(partName, None)
if not subpartDef:
# Whole part
subset = None
partDef = partBundleDict.get(partName)
else:
# Sub-part
subset = subpartDef.subset
partDef = partBundleDict.get(subpartDef.truePartName)
if not partDef:
Actor.notify.warning("no part named %s!" % (partName))
return []
parts = [partDef]
else:
subset = None
parts = partBundleDict.values()
for partData in parts:
partBundle = partData.getBundle()
if not pattern.hasGlobCharacters() and not subset:
# The simple case.
joint = partBundle.findChild(jointName)
if joint:
joints.append(joint)
else:
# The more complex case.
isIncluded = True
if subset:
isIncluded = subset.isIncludeEmpty()
self.__getPartJoints(joints, pattern, partBundle, subset, isIncluded)
return joints
def getOverlappingJoints(self, partNameA, partNameB, jointName = '*', lodName = None):
""" Returns the set of joints, matching jointName, that are
shared between partNameA and partNameB. """
jointsA = set(self.getJoints(partName = partNameA, jointName = jointName, lodName = lodName))
jointsB = set(self.getJoints(partName = partNameB, jointName = jointName, lodName = lodName))
return jointsA & jointsB
def __getPartJoints(self, joints, pattern, partNode, subset, isIncluded):
""" Recursively walks the joint hierarchy to look for matching
joint names, implementing getJoints(). """
name = partNode.getName()
if subset:
# Constrain the traversal just to the named subset.
if subset.matchesInclude(name):
isIncluded = True
elif subset.matchesExclude(name):
isIncluded = False
if isIncluded and pattern.matches(name) and isinstance(partNode, MovingPartBase):
joints.append(partNode)
for child in partNode.getChildren():
self.__getPartJoints(joints, pattern, child, subset, isIncluded)
def getJointTransform(self, partName, jointName, lodName='lodRoot'):
partBundleDict=self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
joint = bundle.findChild(jointName)
if joint == None:
Actor.notify.warning("no joint named %s!" % (jointName))
return None
return joint.getDefaultValue()
def getJointTransformState(self, partName, jointName, lodName='lodRoot'):
partBundleDict=self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
joint = bundle.findChild(jointName)
if joint == None:
Actor.notify.warning("no joint named %s!" % (jointName))
return None
return joint.getTransformState()
def controlJoint(self, node, partName, jointName, lodName="lodRoot"):
"""The converse of exposeJoint: this associates the joint with
the indicated node, so that the joint transform will be copied
from the node to the joint each frame. This can be used for
programmer animation of a particular joint at runtime.
The parameter node should be the NodePath for the node whose
transform will animate the joint. If node is None, a new node
will automatically be created and loaded with the joint's
initial transform. In either case, the node used will be
returned.
It used to be necessary to call this before any animations
have been loaded and bound, but that is no longer so.
"""
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
anyGood = False
for bundleDict in self.__partBundleDict.values():
bundle = bundleDict[trueName].getBundle()
if node == None:
node = self.attachNewNode(ModelNode(jointName))
joint = bundle.findChild(jointName)
if joint and isinstance(joint, MovingPartMatrix):
node.setMat(joint.getDefaultValue())
if bundle.controlJoint(jointName, node.node()):
anyGood = True
if not anyGood:
self.notify.warning("Cannot control joint %s" % (jointName))
return node
def freezeJoint(self, partName, jointName, transform = None,
pos=Vec3(0,0,0), hpr=Vec3(0,0,0), scale=Vec3(1,1,1)):
"""Similar to controlJoint, but the transform assigned is
static, and may not be animated at runtime (without another
subsequent call to freezeJoint). This is slightly more
optimal than controlJoint() for cases in which the transform
is not intended to be animated during the lifetime of the
Actor. """
if transform == None:
transform = TransformState.makePosHprScale(pos, hpr, scale)
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
anyGood = False
for bundleDict in self.__partBundleDict.values():
if bundleDict[trueName].getBundle().freezeJoint(jointName, transform):
anyGood = True
if not anyGood:
self.notify.warning("Cannot freeze joint %s" % (jointName))
def releaseJoint(self, partName, jointName):
"""Undoes a previous call to controlJoint() or freezeJoint()
and restores the named joint to its normal animation. """
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
for bundleDict in self.__partBundleDict.values():
bundleDict[trueName].getBundle().releaseJoint(jointName)
def instance(self, path, partName, jointName, lodName="lodRoot"):
"""instance(self, NodePath, string, string, key="lodRoot")
Instance a nodePath to an actor part at a joint called jointName"""
partBundleDict = self.__partBundleDict.get(lodName)
if partBundleDict:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
joint = partDef.partBundleNP.find("**/" + jointName)
if (joint.isEmpty()):
Actor.notify.warning("%s not found!" % (jointName))
else:
return path.instanceTo(joint)
else:
Actor.notify.warning("no part named %s!" % (partName))
else:
Actor.notify.warning("no lod named %s!" % (lodName))
def attach(self, partName, anotherPartName, jointName, lodName="lodRoot"):
"""attach(self, string, string, string, key="lodRoot")
Attach one actor part to another at a joint called jointName"""
partBundleDict = self.__partBundleDict.get(lodName)
if partBundleDict:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
anotherPartDef = partBundleDict.get(anotherPartName)
if anotherPartDef:
joint = anotherPartDef.partBundleNP.find("**/" + jointName)
if (joint.isEmpty()):
Actor.notify.warning("%s not found!" % (jointName))
else:
partDef.partBundleNP.reparentTo(joint)
else:
Actor.notify.warning("no part named %s!" % (anotherPartName))
else:
Actor.notify.warning("no part named %s!" % (partName))
else:
Actor.notify.warning("no lod named %s!" % (lodName))
def drawInFront(self, frontPartName, backPartName, mode,
root=None, lodName=None):
"""drawInFront(self, string, int, string=None, key=None)
Arrange geometry so the frontPart(s) are drawn in front of
backPart.
If mode == -1, the geometry is simply arranged to be drawn in
the correct order, assuming it is already under a
direct-render scene graph (like the DirectGui system). That
is, frontPart is reparented to backPart, and backPart is
reordered to appear first among its siblings.
If mode == -2, the geometry is arranged to be drawn in the
correct order, and depth test/write is turned off for
frontPart.
If mode == -3, frontPart is drawn as a decal onto backPart.
This assumes that frontPart is mostly coplanar with and does
not extend beyond backPart, and that backPart is mostly flat
(not self-occluding).
If mode > 0, the frontPart geometry is placed in the 'fixed'
bin, with the indicated drawing order. This will cause it to
be drawn after almost all other geometry. In this case, the
backPartName is actually unused.
Takes an optional argument root as the start of the search for the
given parts. Also takes optional lod name to refine search for the
named parts. If root and lod are defined, we search for the given
root under the given lod.
"""
# check to see if we are working within an lod
if lodName != None:
# find the named lod node
lodRoot = self.__LODNode.find(str(lodName))
if root == None:
# no need to look further
root = lodRoot
else:
# look for root under lod
root = lodRoot.find("**/" + root)
else:
# start search from self if no root and no lod given
if root == None:
root = self
frontParts = root.findAllMatches("**/" + frontPartName)
if mode > 0:
# Use the 'fixed' bin instead of reordering the scene
# graph.
numFrontParts = frontParts.getNumPaths()
for partNum in range(0, numFrontParts):
frontParts[partNum].setBin('fixed', mode)
return
if mode == -2:
# Turn off depth test/write on the frontParts.
numFrontParts = frontParts.getNumPaths()
for partNum in range(0, numFrontParts):
frontParts[partNum].setDepthWrite(0)
frontParts[partNum].setDepthTest(0)
# Find the back part.
backPart = root.find("**/" + backPartName)
if (backPart.isEmpty()):
Actor.notify.warning("no part named %s!" % (backPartName))
return
if mode == -3:
# Draw as a decal.
backPart.node().setEffect(DecalEffect.make())
else:
# Reorder the backPart to be the first of its siblings.
backPart.reparentTo(backPart.getParent(), -1)
#reparent all the front parts to the back part
frontParts.reparentTo(backPart)
def fixBounds(self, partName = None):
if(partName == None):
#iterate through everything
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().update()
geomNodes = char.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in xrange(numGeomNodes):
thisGeomNode = geomNodes.getPath(nodeNum)
numGeoms = thisGeomNode.node().getNumGeoms()
for geomNum in xrange(numGeoms):
thisGeom = thisGeomNode.node().getGeom(geomNum)
thisGeom.markBoundsStale()
thisGeomNode.node().markInternalBoundsStale()
else:
#iterate through for a specific part
for lodData in self.__partBundleDict.values():
partData = lodData.get(partName)
if(partData):
char = partData.partBundleNP
char.node().update()
geomNodes = char.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in xrange(numGeomNodes):
thisGeomNode = geomNodes.getPath(nodeNum)
numGeoms = thisGeomNode.node().getNumGeoms()
for geomNum in xrange(numGeoms):
thisGeom = thisGeomNode.node().getGeom(geomNum)
thisGeom.markBoundsStale()
thisGeomNode.node().markInternalBoundsStale()
def fixBounds_old(self, part=None):
"""fixBounds(self, nodePath=None)
Force recomputation of bounding spheres for all geoms
in a given part. If no part specified, fix all geoms
in this actor
"""
# if no part name specified fix all parts
if (part==None):
part = self
# update all characters first
charNodes = part.findAllMatches("**/+Character")
numCharNodes = charNodes.getNumPaths()
for charNum in range(0, numCharNodes):
(charNodes.getPath(charNum)).node().update()
# for each geomNode, iterate through all geoms and force update
# of bounding spheres by marking current bounds as stale
geomNodes = part.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in range(0, numGeomNodes):
thisGeomNode = geomNodes.getPath(nodeNum)
numGeoms = thisGeomNode.node().getNumGeoms()
for geomNum in range(0, numGeoms):
thisGeom = thisGeomNode.node().getGeom(geomNum)
thisGeom.markBoundsStale()
assert Actor.notify.debug("fixing bounds for node %s, geom %s" % \
(nodeNum, geomNum))
thisGeomNode.node().markInternalBoundsStale()
def showAllBounds(self):
"""
Show the bounds of all actor geoms
"""
geomNodes = self.__geomNode.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in range(0, numGeomNodes):
geomNodes.getPath(nodeNum).showBounds()
def hideAllBounds(self):
"""
Hide the bounds of all actor geoms
"""
geomNodes = self.__geomNode.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in range(0, numGeomNodes):
geomNodes.getPath(nodeNum).hideBounds()
# actions
def animPanel(self):
from direct.showbase import TkGlobal
from direct.tkpanels import AnimPanel
return AnimPanel.AnimPanel(self)
def stop(self, animName=None, partName=None):
"""stop(self, string=None, string=None)
Stop named animation on the given part of the actor.
If no name specified then stop all animations on the actor.
NOTE: stops all LODs"""
for control in self.getAnimControls(animName, partName):
control.stop()
def play(self, animName, partName=None, fromFrame=None, toFrame=None):
"""play(self, string, string=None)
Play the given animation on the given part of the actor.
If no part is specified, try to play on all parts. NOTE:
plays over ALL LODs"""
if fromFrame == None:
for control in self.getAnimControls(animName, partName):
control.play()
else:
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.play(fromFrame, control.getNumFrames() - 1)
else:
control.play(fromFrame, toFrame)
def loop(self, animName, restart=1, partName=None,
fromFrame=None, toFrame=None):
"""loop(self, string, int=1, string=None)
Loop the given animation on the given part of the actor,
restarting at zero frame if requested. If no part name
is given then try to loop on all parts. NOTE: loops on
all LOD's
"""
if fromFrame == None:
for control in self.getAnimControls(animName, partName):
control.loop(restart)
else:
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.loop(restart, fromFrame, control.getNumFrames() - 1)
else:
control.loop(restart, fromFrame, toFrame)
def pingpong(self, animName, restart=1, partName=None,
fromFrame=None, toFrame=None):
"""pingpong(self, string, int=1, string=None)
Loop the given animation on the given part of the actor,
restarting at zero frame if requested. If no part name
is given then try to loop on all parts. NOTE: loops on
all LOD's"""
if fromFrame == None:
fromFrame = 0
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.pingpong(restart, fromFrame, control.getNumFrames() - 1)
else:
control.pingpong(restart, fromFrame, toFrame)
def pose(self, animName, frame, partName=None, lodName=None):
"""pose(self, string, int, string=None)
Pose the actor in position found at given frame in the specified
animation for the specified part. If no part is specified attempt
to apply pose to all parts."""
for control in self.getAnimControls(animName, partName, lodName):
control.pose(frame)
def setBlend(self, animBlend = None, frameBlend = None,
blendType = None, partName = None):
"""
Changes the way the Actor handles blending of multiple
different animations, and/or interpolation between consecutive
frames.
The animBlend and frameBlend parameters are boolean flags.
You may set either or both to True or False. If you do not
specify them, they do not change from the previous value.
When animBlend is True, multiple different animations may
simultaneously be playing on the Actor. This means you may
call play(), loop(), or pose() on multiple animations and have
all of them contribute to the final pose each frame.
In this mode (that is, when animBlend is True), starting a
particular animation with play(), loop(), or pose() does not
implicitly make the animation visible; you must also call
setControlEffect() for each animation you wish to use to
indicate how much each animation contributes to the final
pose.
The frameBlend flag is unrelated to playing multiple
animations. It controls whether the Actor smoothly
interpolates between consecutive frames of its animation (when
the flag is True) or holds each frame until the next one is
ready (when the flag is False). The default value of
frameBlend is controlled by the interpolate-frames Config.prc
variable.
In either case, you may also specify blendType, which controls
the precise algorithm used to blend two or more different
matrix values into a final result. Different skeleton
hierarchies may benefit from different algorithms. The
default blendType is controlled by the anim-blend-type
Config.prc variable.
"""
for bundle in self.getPartBundles(partName = partName):
if blendType != None:
bundle.setBlendType(blendType)
if animBlend != None:
bundle.setAnimBlendFlag(animBlend)
if frameBlend != None:
bundle.setFrameBlendFlag(frameBlend)
def enableBlend(self, blendType = PartBundle.BTNormalizedLinear, partName = None):
"""
Enables blending of multiple animations simultaneously.
After this is called, you may call play(), loop(), or pose()
on multiple animations and have all of them contribute to the
final pose each frame.
With blending in effect, starting a particular animation with
play(), loop(), or pose() does not implicitly make the
animation visible; you must also call setControlEffect() for
each animation you wish to use to indicate how much each
animation contributes to the final pose.
This method is deprecated. You should use setBlend() instead.
"""
self.setBlend(animBlend = True, blendType = blendType, partName = partName)
def disableBlend(self, partName = None):
"""
Restores normal one-animation-at-a-time operation after a
previous call to enableBlend().
This method is deprecated. You should use setBlend() instead.
"""
self.setBlend(animBlend = False, partName = partName)
def setControlEffect(self, animName, effect,
partName = None, lodName = None):
"""
Sets the amount by which the named animation contributes to
the overall pose. This controls blending of multiple
animations; it only makes sense to call this after a previous
call to setBlend(animBlend = True).
"""
for control in self.getAnimControls(animName, partName, lodName):
control.getPart().setControlEffect(control, effect)
def getAnimFilename(self, animName, partName='modelRoot'):
"""
getAnimFilename(self, animName)
return the animFilename given the animName
"""
if self.mergeLODBundles:
lodName = 'common'
elif self.switches:
lodName = str(self.switches.keys()[0])
else:
lodName = 'lodRoot'
try:
return self.__animControlDict[lodName][partName][animName].filename
except:
return None
def getAnimControl(self, animName, partName=None, lodName=None,
allowAsyncBind = True):
"""
getAnimControl(self, string, string, string="lodRoot")
Search the animControl dictionary indicated by lodName for
a given anim and part. If none specified, try the first part and lod.
Return the animControl if present, or None otherwise.
"""
if not partName:
partName = 'modelRoot'
if self.mergeLODBundles:
lodName = 'common'
elif not lodName:
if self.switches:
lodName = str(self.switches.keys()[0])
else:
lodName = 'lodRoot'
partDict = self.__animControlDict.get(lodName)
# if this assertion fails, named lod was not present
assert partDict != None
animDict = partDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
else:
anim = animDict.get(animName)
if anim == None:
# anim was not present
assert Actor.notify.debug("couldn't find anim: %s" % (animName))
pass
else:
# bind the animation first if we need to
if not anim.animControl:
self.__bindAnimToPart(animName, partName, lodName,
allowAsyncBind = allowAsyncBind)
elif not allowAsyncBind:
anim.animControl.waitPending()
return anim.animControl
return None
def getAnimControls(self, animName=None, partName=None, lodName=None,
allowAsyncBind = True):
"""getAnimControls(self, string, string=None, string=None)
Returns a list of the AnimControls that represent the given
animation for the given part and the given lod.
If animName is None or omitted, the currently-playing
animation (or all currently-playing animations) is returned.
If animName is True, all animations are returned. If animName
is a single string name, that particular animation is
returned. If animName is a list of string names, all of the
names animations are returned.
If partName is None or omitted, all parts are returned (or
possibly the one overall Actor part, according to the
subpartsComplete flag).
If lodName is None or omitted, all LOD's are returned.
"""
if partName == None and self.__subpartsComplete:
# If we have the __subpartsComplete flag, and no partName
# is specified, it really means to play the animation on
# all subparts, not on the overall Actor.
partName = self.__subpartDict.keys()
controls = []
# build list of lodNames and corresponding animControlDicts
# requested.
if lodName == None or self.mergeLODBundles:
# Get all LOD's
animControlDictItems = self.__animControlDict.items()
else:
partDict = self.__animControlDict.get(lodName)
if partDict == None:
Actor.notify.warning("couldn't find lod: %s" % (lodName))
animControlDictItems = []
else:
animControlDictItems = [(lodName, partDict)]
for lodName, partDict in animControlDictItems:
# Now, build the list of partNames and the corresponding
# animDicts.
if partName == None:
# Get all main parts, but not sub-parts.
animDictItems = []
for thisPart, animDict in partDict.items():
if thisPart not in self.__subpartDict:
animDictItems.append((thisPart, animDict))
else:
# Get exactly the named part or parts.
if isinstance(partName, types.StringTypes):
partNameList = [partName]
else:
partNameList = partName
animDictItems = []
for pName in partNameList:
animDict = partDict.get(pName)
if animDict == None:
# Maybe it's a subpart that hasn't been bound yet.
subpartDef = self.__subpartDict.get(pName)
if subpartDef:
animDict = {}
partDict[pName] = animDict
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (pName))
else:
animDictItems.append((pName, animDict))
if animName is None:
# get all playing animations
for thisPart, animDict in animDictItems:
for anim in animDict.values():
if anim.animControl and anim.animControl.isPlaying():
controls.append(anim.animControl)
else:
# get the named animation(s) only.
if isinstance(animName, types.StringTypes):
# A single animName
animNameList = [animName]
else:
# A list of animNames, or True to indicate all anims.
animNameList = animName
for thisPart, animDict in animDictItems:
names = animNameList
if animNameList is True:
names = animDict.keys()
for animName in names:
anim = animDict.get(animName)
if anim == None and partName != None:
for pName in partNameList:
# Maybe it's a subpart that hasn't been bound yet.
subpartDef = self.__subpartDict.get(pName)
if subpartDef:
truePartName = subpartDef.truePartName
anim = partDict[truePartName].get(animName)
if anim:
anim = anim.makeCopy()
animDict[animName] = anim
if anim == None:
# anim was not present
assert Actor.notify.debug("couldn't find anim: %s" % (animName))
pass
else:
# bind the animation first if we need to
animControl = anim.animControl
if animControl == None:
animControl = self.__bindAnimToPart(
animName, thisPart, lodName,
allowAsyncBind = allowAsyncBind)
elif not allowAsyncBind:
# Force the animation to load if it's
# not already loaded.
animControl.waitPending()
if animControl:
controls.append(animControl)
return controls
def loadModel(self, modelPath, partName="modelRoot", lodName="lodRoot",
copy = True, okMissing = None, autoBindAnims = True):
"""Actor model loader. Takes a model name (ie file path), a part
name(defaults to "modelRoot") and an lod name(defaults to "lodRoot").
"""
assert partName not in self.__subpartDict
assert Actor.notify.debug("in loadModel: %s, part: %s, lod: %s, copy: %s" % \
(modelPath, partName, lodName, copy))
if isinstance(modelPath, NodePath):
# If we got a NodePath instead of a string, use *that* as
# the model directly.
if (copy):
model = modelPath.copyTo(NodePath())
else:
model = modelPath
else:
# otherwise, we got the name of the model to load.
loaderOptions = self.modelLoaderOptions
if not copy:
# If copy = 0, then we should always hit the disk.
loaderOptions = LoaderOptions(loaderOptions)
loaderOptions.setFlags(loaderOptions.getFlags() & ~LoaderOptions.LFNoRamCache)
# Pass loaderOptions to specify that we want to
# get the skeleton model. This only matters to model
# files (like .mb) for which we can choose to extract
# either the skeleton or animation, or neither.
model = loader.loadModel(modelPath, loaderOptions = loaderOptions, okMissing = okMissing)
if (model == None):
raise StandardError, "Could not load Actor model %s" % (modelPath)
if (model.node().isOfType(Character.getClassType())):
bundleNP = model
else:
bundleNP = model.find("**/+Character")
if (bundleNP.isEmpty()):
Actor.notify.warning("%s is not a character!" % (modelPath))
model.reparentTo(self.__geomNode)
else:
# Maybe the model file also included some animations. If
# so, try to bind them immediately and put them into the
# animControlDict.
if autoBindAnims:
acc = AnimControlCollection()
autoBind(model.node(), acc, ~0)
numAnims = acc.getNumAnims()
else:
numAnims = 0
# Now extract out the Character and integrate it with
# the Actor.
if (lodName!="lodRoot"):
# parent to appropriate node under LOD switch
bundleNP.reparentTo(self.__LODNode.find(str(lodName)))
else:
bundleNP.reparentTo(self.__geomNode)
self.__prepareBundle(bundleNP, model.node(), partName, lodName)
# we rename this node to make Actor copying easier
bundleNP.node().setName("%s%s"%(Actor.partPrefix,partName))
if numAnims != 0:
# If the model had some animations, store them in the
# dict so they can be played.
Actor.notify.info("model contains %s animations." % (numAnims))
# make sure this lod is in anim control dict
if self.mergeLODBundles:
lodName = 'common'
self.__animControlDict.setdefault(lodName, {})
self.__animControlDict[lodName].setdefault(partName, {})
for i in range(numAnims):
animControl = acc.getAnim(i)
animName = acc.getAnimName(i)
animDef = Actor.AnimDef()
animDef.animControl = animControl
self.__animControlDict[lodName][partName][animName] = animDef
def __prepareBundle(self, bundleNP, partModel,
partName="modelRoot", lodName="lodRoot"):
assert partName not in self.__subpartDict
# Rename the node at the top of the hierarchy, if we
# haven't already, to make it easier to identify this
# actor in the scene graph.
if not self.gotName:
self.node().setName(bundleNP.node().getName())
self.gotName = 1
bundleDict = self.__partBundleDict.get(lodName, None)
if bundleDict == None:
# make a dictionary to store these parts in
bundleDict = {}
self.__partBundleDict[lodName] = bundleDict
self.__updateSortedLODNames()
node = bundleNP.node()
# A model loaded from disk will always have just one bundle.
assert(node.getNumBundles() == 1)
bundleHandle = node.getBundleHandle(0)
if self.mergeLODBundles:
loadedBundleHandle = self.__commonBundleHandles.get(partName, None)
if loadedBundleHandle:
# We've already got a bundle for this part; merge it.
node.mergeBundles(bundleHandle, loadedBundleHandle)
bundleHandle = loadedBundleHandle
else:
# We haven't already got a bundle for this part; store it.
self.__commonBundleHandles[partName] = bundleHandle
bundleDict[partName] = Actor.PartDef(bundleNP, bundleHandle, partModel)
def makeSubpart(self, partName, includeJoints, excludeJoints = [],
parent="modelRoot", overlapping = False):
"""Defines a new "part" of the Actor that corresponds to the
same geometry as the named parent part, but animates only a
certain subset of the joints. This can be used for
partial-body animations, for instance to animate a hand waving
while the rest of the body continues to play its walking
animation.
includeJoints is a list of joint names that are to be animated
by the subpart. Each name can include globbing characters
like '?' or '*', which will match one or any number of
characters, respectively. Including a joint by naming it in
includeJoints implicitly includes all of the descendents of
that joint as well, except for excludeJoints, below.
excludeJoints is a list of joint names that are *not* to be
animated by the subpart. As in includeJoints, each name can
include globbing characters. If a joint is named by
excludeJoints, it will not be included (and neither will any
of its descendents), even if a parent joint was named by
includeJoints.
if overlapping is False, an error is raised (in the dev build)
if this subpart shares joints with any other subparts. If
overlapping is True, no such error is raised.
parent is the actual partName that this subpart is based
on."""
assert partName not in self.__subpartDict
subpartDef = self.__subpartDict.get(parent, Actor.SubpartDef(''))
subset = PartSubset(subpartDef.subset)
for name in includeJoints:
subset.addIncludeJoint(GlobPattern(name))
for name in excludeJoints:
subset.addExcludeJoint(GlobPattern(name))
self.__subpartDict[partName] = Actor.SubpartDef(parent, subset)
if __dev__ and not overlapping and self.validateSubparts.getValue():
# Without the overlapping flag True, we're not allowed to
# define overlapping sub-parts. Verify that we haven't.
for otherPartName, otherPartDef in self.__subpartDict.items():
if otherPartName != partName and otherPartDef.truePartName == parent:
joints = self.getOverlappingJoints(partName, otherPartName)
if joints:
raise StandardError, 'Overlapping joints: %s and %s' % (partName, otherPartName)
def setSubpartsComplete(self, flag):
"""Sets the subpartsComplete flag. This affects the behavior
of play(), loop(), stop(), etc., when no explicit parts are
specified.
When this flag is False (the default), play() with no parts
means to play the animation on the overall Actor, which is a
separate part that overlaps each of the subparts. If you then
play a different animation on a subpart, it may stop the
overall animation (in non-blend mode) or blend with it (in
blend mode).
When this flag is True, play() with no parts means to play the
animation on each of the subparts--instead of on the overall
Actor. In this case, you may then play a different animation
on a subpart, which replaces only that subpart's animation.
It makes sense to set this True when the union of all of your
subparts completely defines the entire Actor.
"""
self.__subpartsComplete = flag
if __dev__ and self.__subpartsComplete and self.validateSubparts.getValue():
# If we've specified any parts at all so far, make sure we've
# specified all of them.
if self.__subpartDict:
self.verifySubpartsComplete()
def getSubpartsComplete(self):
"""See setSubpartsComplete()."""
return self.__subpartsComplete
def verifySubpartsComplete(self, partName = None, lodName = None):
""" Ensures that each joint is defined by at least one
subPart. Prints a warning if this is not the case. """
if partName:
assert partName not in self.__subpartDict
partNames = [partName]
else:
if lodName:
partNames = self.__partBundleDict[lodName].keys()
else:
partNames = self.__partBundleDict.values()[0].keys()
for partName in partNames:
subJoints = set()
for subPartName, subPartDef in self.__subpartDict.items():
if subPartName != partName and subPartDef.truePartName == partName:
subJoints |= set(self.getJoints(partName = subPartName, lodName = lodName))
allJoints = set(self.getJoints(partName = partName, lodName = lodName))
diff = allJoints.difference(subJoints)
if diff:
self.notify.warning('Uncovered joints: %s' % (list(diff)))
def loadAnims(self, anims, partName="modelRoot", lodName="lodRoot"):
"""loadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim loader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults
to 'lodRoot' for non-LOD actors) and dict of corresponding
anims in the form animName:animPath{}
"""
reload = True
if self.mergeLODBundles:
lodNames = ['common']
elif lodName == 'all':
reload = False
lodNames = self.switches.keys()
lodNames.sort()
for i in range(0,len(lodNames)):
lodNames[i] = str(lodNames[i])
else:
lodNames = [lodName]
assert Actor.notify.debug("in loadAnims: %s, part: %s, lod: %s" %
(anims, partName, lodNames[0]))
firstLoad = True
if not reload:
try:
self.__animControlDict[lodNames[0]][partName]
firstLoad = False
except:
pass
for lName in lodNames:
if firstLoad:
self.__animControlDict.setdefault(lName, {})
self.__animControlDict[lName].setdefault(partName, {})
for animName, filename in anims.items():
# make sure this lod is in anim control dict
for lName in lodNames:
if firstLoad:
self.__animControlDict[lName][partName][animName] = Actor.AnimDef()
if isinstance(filename, NodePath):
# We were given a pre-load anim bundle, not a filename.
assert not filename.isEmpty()
if filename.node().isOfType(AnimBundleNode.getClassType()):
animBundleNP = filename
else:
animBundleNP = filename.find('**/+AnimBundleNode')
assert not animBundleNP.isEmpty()
self.__animControlDict[lName][partName][animName].animBundle = animBundleNP.node().getBundle()
else:
# We were given a filename that must be loaded.
# Store the filename only; we will load and bind
# it (and produce an AnimControl) when it is
# played.
self.__animControlDict[lName][partName][animName].filename = filename
def initAnimsOnAllLODs(self,partNames):
if self.mergeLODBundles:
lodNames = ['common']
else:
lodNames = self.__partBundleDict.keys()
for lod in lodNames:
for part in partNames:
self.__animControlDict.setdefault(lod,{})
self.__animControlDict[lod].setdefault(part, {})
#for animName, filename in anims.items():
# # make sure this lod is in anim control dict
# for lod in self.__partBundleDict.keys():
# # store the file path only; we will bind it (and produce
# # an AnimControl) when it is played
#
# self.__animControlDict[lod][partName][animName] = Actor.AnimDef(filename)
def loadAnimsOnAllLODs(self, anims,partName="modelRoot"):
"""loadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim loader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults
to 'lodRoot' for non-LOD actors) and dict of corresponding
anims in the form animName:animPath{}
"""
if self.mergeLODBundles:
lodNames = ['common']
else:
lodNames = self.__partBundleDict.keys()
for animName, filename in anims.items():
# make sure this lod is in anim control dict
for lod in lodNames:
# store the file path only; we will bind it (and produce
# an AnimControl) when it is played
self.__animControlDict[lod][partName][animName]= Actor.AnimDef(filename)
def postFlatten(self):
"""Call this after performing an aggressive flatten operation,
such as flattenStrong(), that involves the Actor. This is
especially necessary when mergeLODBundles is true, since this
kind of actor may be broken after a flatten operation; this
method should restore proper Actor functionality. """
if self.mergeLODBundles:
# Re-merge all bundles, and restore the common bundle map.
self.__commonBundleHandles = {}
for lodName, bundleDict in self.__partBundleDict.items():
for partName, partDef in bundleDict.items():
loadedBundleHandle = self.__commonBundleHandles.get(partName, None)
node = partDef.partBundleNP.node()
if loadedBundleHandle:
node.mergeBundles(partDef.partBundleHandle, loadedBundleHandle)
partDef.partBundleHandle = loadedBundleHandle
else:
self.__commonBundleHandles[partName] = partDef.partBundleHandle
# Since we may have merged together some bundles, all of
# our anims are now suspect. Force them to reload.
self.unloadAnims()
def unloadAnims(self, anims=None, partName=None, lodName=None):
"""unloadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim unloader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults to
'lodRoot' for non-LOD actors) and list of animation
names. Deletes the anim control for the given animation and
parts/lods.
If any parameter is None or omitted, it means all of them.
"""
assert Actor.notify.debug("in unloadAnims: %s, part: %s, lod: %s" %
(anims, partName, lodName))
if lodName == None or self.mergeLODBundles:
lodNames = self.__animControlDict.keys()
else:
lodNames = [lodName]
if (partName == None):
if len(lodNames) > 0:
partNames = self.__animControlDict[lodNames[0]].keys()
else:
partNames = []
else:
partNames = [partName]
if (anims==None):
for lodName in lodNames:
for partName in partNames:
for animDef in self.__animControlDict[lodName][partName].values():
if animDef.animControl != None:
# Try to clear any control effects before we let
# our handle on them go. This is especially
# important if the anim control was blending
# animations.
animDef.animControl.getPart().clearControlEffects()
animDef.animControl = None
else:
for lodName in lodNames:
for partName in partNames:
for anim in anims:
animDef = self.__animControlDict[lodName][partName].get(anim)
if animDef and animDef.animControl != None:
# Try to clear any control effects before we let
# our handle on them go. This is especially
# important if the anim control was blending
# animations.
animDef.animControl.getPart().clearControlEffects()
animDef.animControl = None
def bindAnim(self, animName, partName = None, lodName = None,
allowAsyncBind = False):
"""
Binds the named animation to the named part and/or lod. If
allowAsyncBind is False, this guarantees that the animation is
bound immediately--the animation is never bound in a
sub-thread; it will be loaded and bound in the main thread, so
it will be available by the time this method returns.
The parameters are the same as that for getAnimControls(). In
fact, this method is a thin wrapper around that other method.
Use this method if you need to ensure that an animation is
available before you start to play it, and you don't mind
holding up the render for a frame or two until the animation
is available.
"""
self.getAnimControls(animName = animName, partName = partName,
lodName = lodName,
allowAsyncBind = allowAsyncBind)
def bindAllAnims(self, allowAsyncBind = False):
"""Loads and binds all animations that have been defined for
the Actor. """
self.getAnimControls(animName = True, allowAsyncBind = allowAsyncBind)
def waitPending(self, partName = None):
"""Blocks until all asynchronously pending animations (that
are currently playing) have been loaded and bound the the
Actor. Call this after calling play() if you are using
asynchronous binds, but you need this particular animation
to be loaded immediately. """
for bundle in self.getPartBundles(partName = partName):
bundle.waitPending()
def __bindAnimToPart(self, animName, partName, lodName,
allowAsyncBind = True):
"""
Binds the named animation to the named part/lod and returns
the associated animControl. The animation is loaded and bound
in a sub-thread, if allowAsyncBind is True,
self.allowAsyncBind is True, threading is enabled, and the
animation has a preload table generated for it (e.g. via
"egg-optchar -preload"). Even though the animation may or may
not be yet bound at the time this function returns, a usable
animControl is returned, or None if the animation could not be
bound.
"""
# make sure this anim is in the dict
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDict = self.__animControlDict[lodName]
animDict = partDict.get(partName)
if animDict == None:
# It must be a subpart that hasn't been bound yet.
animDict = {}
partDict[partName] = animDict
anim = animDict.get(animName)
if anim == None:
# It must be a subpart that hasn't been bound yet.
anim = partDict[subpartDef.truePartName].get(animName)
anim = anim.makeCopy()
animDict[animName] = anim
if anim == None:
Actor.notify.error("actor has no animation %s", animName)
# only bind if not already bound!
if anim.animControl:
return anim.animControl
if self.mergeLODBundles:
bundle = self.__commonBundleHandles[subpartDef.truePartName].getBundle()
else:
bundle = self.__partBundleDict[lodName][subpartDef.truePartName].getBundle()
if anim.animBundle:
# We already have a bundle; just bind it.
animControl = bundle.bindAnim(anim.animBundle, -1, subpartDef.subset)
else:
# Load and bind the anim. This might be an asynchronous
# operation that will complete in the background, but if so it
# will still return a usable AnimControl.
animControl = bundle.loadBindAnim(
loader.loader, Filename(anim.filename), -1,
subpartDef.subset, allowAsyncBind and self.allowAsyncBind)
if not animControl:
# Couldn't bind. (This implies the binding operation was
# not attempted asynchronously.)
return None
# store the animControl
anim.animControl = animControl
assert Actor.notify.debug("binding anim: %s to part: %s, lod: %s" %
(animName, partName, lodName))
return animControl
def __copyPartBundles(self, other):
"""__copyPartBundles(self, Actor)
Copy the part bundle dictionary from another actor as this
instance's own. NOTE: this method does not actually copy geometry
"""
for lodName in other.__partBundleDict.keys():
# find the lod Asad
if lodName == 'lodRoot':
partLod = self
else:
partLod = self.__LODNode.find(str(lodName))
if partLod.isEmpty():
Actor.notify.warning("no lod named: %s" % (lodName))
return None
for partName, partDef in other.__partBundleDict[lodName].items():
# We can really only copy from a non-flattened avatar.
assert partDef.partBundleNP.node().getNumBundles() == 1
# find the part in our tree
bundleNP = partLod.find("**/%s%s"%(Actor.partPrefix,partName))
if (bundleNP != None):
# store the part bundle
self.__prepareBundle(bundleNP, partDef.partModel,
partName, lodName)
else:
Actor.notify.error("lod: %s has no matching part: %s" %
(lodName, partName))
def __copySubpartDict(self, other):
"""Copies the subpartDict from another as this instance's own.
This makes a deep copy of the map and all of the names and
PartSubset objects within it. We can't use copy.deepcopy()
because of the included C++ PartSubset objects."""
self.__subpartDict = {}
for partName, subpartDef in other.__subpartDict.items():
subpartDefCopy = subpartDef
if subpartDef:
subpartDef = subpartDef.makeCopy()
self.__subpartDict[partName] = subpartDef
def __copyAnimControls(self, other):
"""__copyAnimControls(self, Actor)
Get the anims from the anim control's in the anim control
dictionary of another actor. Bind these anim's to the part
bundles in our part bundle dict that have matching names, and
store the resulting anim controls in our own part bundle dict"""
assert(other.mergeLODBundles == self.mergeLODBundles)
for lodName in other.__animControlDict.keys():
self.__animControlDict[lodName] = {}
for partName in other.__animControlDict[lodName].keys():
self.__animControlDict[lodName][partName] = {}
for animName in other.__animControlDict[lodName][partName].keys():
anim = other.__animControlDict[lodName][partName][animName]
anim = anim.makeCopy()
self.__animControlDict[lodName][partName][animName] = anim
def actorInterval(self, *args, **kw):
from direct.interval import ActorInterval
return ActorInterval.ActorInterval(self, *args, **kw)
def getAnimBlends(self, animName=None, partName=None, lodName=None):
""" Returns a list of the form:
[ (lodName, [(animName, [(partName, effect), (partName, effect), ...]),
(animName, [(partName, effect), (partName, effect), ...]),
...]),
(lodName, [(animName, [(partName, effect), (partName, effect), ...]),
(animName, [(partName, effect), (partName, effect), ...]),
...]),
... ]
This list reports the non-zero control effects for each
partName within a particular animation and LOD. """
result = []
if animName is None:
animNames = self.getAnimNames()
else:
animNames = [animName]
if lodName is None:
lodNames = self.getLODNames()
if self.mergeLODBundles:
lodNames = lodNames[:1]
else:
lodNames = [lodName]
if partName == None and self.__subpartsComplete:
partNames = self.__subpartDict.keys()
else:
partNames = [partName]
for lodName in lodNames:
animList = []
for animName in animNames:
blendList = []
for partName in partNames:
control = self.getAnimControl(animName, partName, lodName)
if control:
part = control.getPart()
effect = part.getControlEffect(control)
if effect > 0.:
blendList.append((partName, effect))
if blendList:
animList.append((animName, blendList))
if animList:
result.append((lodName, animList))
return result
def printAnimBlends(self, animName=None, partName=None, lodName=None):
for lodName, animList in self.getAnimBlends(animName, partName, lodName):
print 'LOD %s:' % (lodName)
for animName, blendList in animList:
list = []
for partName, effect in blendList:
list.append('%s:%.3f' % (partName, effect))
print ' %s: %s' % (animName, ', '.join(list))
def osdAnimBlends(self, animName=None, partName=None, lodName=None):
if not onScreenDebug.enabled:
return
# puts anim blending info into the on-screen debug panel
if animName is None:
animNames = self.getAnimNames()
else:
animNames = [animName]
for animName in animNames:
if animName is 'nothing':
continue
thisAnim = ''
totalEffect = 0.
controls = self.getAnimControls(animName, partName, lodName)
for control in controls:
part = control.getPart()
name = part.getName()
effect = part.getControlEffect(control)
if effect > 0.:
totalEffect += effect
thisAnim += ('%s:%.3f, ' % (name, effect))
thisAnim += "\n"
for control in controls:
part = control.getPart()
name = part.getName()
rate = control.getPlayRate()
thisAnim += ('%s:%.1f, ' % (name, rate))
# don't display anything if this animation is not being played
itemName = 'anim %s' % animName
if totalEffect > 0.:
onScreenDebug.add(itemName, thisAnim)
else:
if onScreenDebug.has(itemName):
onScreenDebug.remove(itemName)
# these functions compensate for actors that are modeled facing the viewer but need
# to face away from the camera in the game
def faceAwayFromViewer(self):
self.getGeomNode().setH(180)
def faceTowardsViewer(self):
self.getGeomNode().setH(0)
def renamePartBundles(self, partName, newBundleName):
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
for partBundleDict in self.__partBundleDict.values():
partDef=partBundleDict.get(subpartDef.truePartName)
partDef.getBundle().setName(newBundleName)
| {
"repo_name": "jjkoletar/panda3d",
"path": "direct/src/actor/Actor.py",
"copies": "1",
"size": "106198",
"license": "bsd-3-clause",
"hash": 315747505434363000,
"line_mean": 40.548513302,
"line_max": 116,
"alpha_frac": 0.5695116669,
"autogenerated": false,
"ratio": 4.405824759376038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5475336426276037,
"avg_score": null,
"num_lines": null
} |
"""Actor module: contains the Actor class"""
__all__ = ['Actor']
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import LODNode
import types, copy
class Actor(DirectObject, NodePath):
"""
Actor class: Contains methods for creating, manipulating
and playing animations on characters
"""
notify = directNotify.newCategory("Actor")
partPrefix = "__Actor_"
modelLoaderOptions = LoaderOptions(LoaderOptions.LFSearch |
LoaderOptions.LFReportErrors |
LoaderOptions.LFConvertSkeleton)
animLoaderOptions = LoaderOptions(LoaderOptions.LFSearch |
LoaderOptions.LFReportErrors |
LoaderOptions.LFConvertAnim)
validateSubparts = ConfigVariableBool('validate-subparts', True)
class PartDef:
"""Instances of this class are stored within the
PartBundleDict to track all of the individual PartBundles
associated with the Actor. In general, each separately loaded
model file is a different PartBundle. This can include the
multiple different LOD's, as well as the multiple different
pieces of a multipart Actor. """
def __init__(self, partBundleNP, partBundleHandle, partModel):
# We also save the ModelRoot node along with the
# PartBundle, so that the reference count in the ModelPool
# will be accurate.
self.partBundleNP = partBundleNP
self.partBundleHandle = partBundleHandle
self.partModel = partModel
def getBundle(self):
return self.partBundleHandle.getBundle()
def __repr__(self):
return 'Actor.PartDef(%s, %s)' % (repr(self.partBundleNP), repr(self.partModel))
class AnimDef:
"""Instances of this class are stored within the
AnimControlDict to track all of the animations associated with
the Actor. This includes animations that have already been
bound (these have a valid AnimControl) as well as those that
have not yet been bound (for these, self.animControl is None).
There is a different AnimDef for each different part or
sub-part, times each different animation in the AnimDict. """
def __init__(self, filename = None, animBundle = None):
self.filename = filename
self.animBundle = None
self.animControl = None
def makeCopy(self):
return Actor.AnimDef(self.filename, self.animBundle)
def __repr__(self):
return 'Actor.AnimDef(%s)' % (repr(self.filename))
class SubpartDef:
"""Instances of this class are stored within the SubpartDict
to track the existance of arbitrary sub-parts. These are
designed to appear to the user to be identical to true "part"
of a multi-part Actor, but in fact each subpart represents a
subset of the joints of an existing part (which is accessible
via a different name). """
def __init__(self, truePartName, subset = PartSubset()):
self.truePartName = truePartName
self.subset = subset
def makeCopy(self):
return Actor.SubpartDef(self.truePartName, PartSubset(self.subset))
def __repr__(self):
return 'Actor.SubpartDef(%s, %s)' % (repr(self.truePartName), repr(self.subset))
def __init__(self, models=None, anims=None, other=None, copy=True,
lodNode = None, flattenable = True, setFinal = False,
mergeLODBundles = None, allowAsyncBind = None,
okMissing = None):
"""__init__(self, string | string:string{}, string:string{} |
string:(string:string{}){}, Actor=None)
Actor constructor: can be used to create single or multipart
actors. If another Actor is supplied as an argument this
method acts like a copy constructor. Single part actors are
created by calling with a model and animation dictionary
(animName:animPath{}) as follows:
a = Actor("panda-3k.egg", {"walk":"panda-walk.egg" \
"run":"panda-run.egg"})
This could be displayed and animated as such:
a.reparentTo(render)
a.loop("walk")
a.stop()
Multipart actors expect a dictionary of parts and a dictionary
of animation dictionaries (partName:(animName:animPath{}){}) as
below:
a = Actor(
# part dictionary
{"head":"char/dogMM/dogMM_Shorts-head-mod", \
"torso":"char/dogMM/dogMM_Shorts-torso-mod", \
"legs":"char/dogMM/dogMM_Shorts-legs-mod"}, \
# dictionary of anim dictionaries
{"head":{"walk":"char/dogMM/dogMM_Shorts-head-walk", \
"run":"char/dogMM/dogMM_Shorts-head-run"}, \
"torso":{"walk":"char/dogMM/dogMM_Shorts-torso-walk", \
"run":"char/dogMM/dogMM_Shorts-torso-run"}, \
"legs":{"walk":"char/dogMM/dogMM_Shorts-legs-walk", \
"run":"char/dogMM/dogMM_Shorts-legs-run"} \
})
In addition multipart actor parts need to be connected together
in a meaningful fashion:
a.attach("head", "torso", "joint-head")
a.attach("torso", "legs", "joint-hips")
#
# ADD LOD COMMENT HERE!
#
Other useful Actor class functions:
#fix actor eye rendering
a.drawInFront("joint-pupil?", "eyes*")
#fix bounding volumes - this must be done after drawing
#the actor for a few frames, otherwise it has no effect
a.fixBounds()
"""
try:
self.Actor_initialized
return
except:
self.Actor_initialized = 1
# initialize our NodePath essence
NodePath.__init__(self)
# Set the mergeLODBundles flag. If this is true, all
# different LOD's will be merged into a single common bundle
# (joint hierarchy). All LOD's will thereafter share the same
# skeleton, even though they may have been loaded from
# different egg files. If this is false, LOD's will be kept
# completely isolated, and each LOD will have its own
# skeleton.
# When this flag is true, __animControlDict has only one key,
# ['common']; when it is false, __animControlDict has one key
# per each LOD name.
if mergeLODBundles == None:
# If this isn't specified, it comes from the Config.prc
# file.
self.mergeLODBundles = base.config.GetBool('merge-lod-bundles', True)
else:
self.mergeLODBundles = mergeLODBundles
# Set the allowAsyncBind flag. If this is true, it enables
# asynchronous animation binding. This requires that you have
# run "egg-optchar -preload" on your animation and models to
# generate the appropriate AnimPreloadTable.
if allowAsyncBind == None:
self.allowAsyncBind = base.config.GetBool('allow-async-bind', True)
else:
self.allowAsyncBind = allowAsyncBind
# create data structures
self.__commonBundleHandles = {}
self.__partBundleDict = {}
self.__subpartDict = {}
self.__sortedLODNames = []
self.__animControlDict = {}
self.__subpartsComplete = False
self.__LODNode = None
self.__LODAnimation = None
self.__LODCenter = Point3(0, 0, 0)
self.switches = None
if (other == None):
# act like a normal constructor
# create base hierarchy
self.gotName = 0
if flattenable:
# If we want a flattenable Actor, don't create all
# those ModelNodes, and the GeomNode is the same as
# the root.
root = PandaNode('actor')
self.assign(NodePath(root))
self.setGeomNode(NodePath(self))
else:
# A standard Actor has a ModelNode at the root, and
# another ModelNode to protect the GeomNode.
root = ModelNode('actor')
root.setPreserveTransform(1)
self.assign(NodePath(root))
self.setGeomNode(self.attachNewNode(ModelNode('actorGeom')))
self.__hasLOD = 0
# load models
#
# four cases:
#
# models, anims{} = single part actor
# models{}, anims{} = single part actor w/ LOD
# models{}, anims{}{} = multi-part actor
# models{}{}, anims{}{} = multi-part actor w/ LOD
#
# make sure we have models
if (models):
# do we have a dictionary of models?
if (type(models)==type({})):
# if this is a dictionary of dictionaries
if (type(models[models.keys()[0]]) == type({})):
# then it must be a multipart actor w/LOD
self.setLODNode(node = lodNode)
# preserve numerical order for lod's
# this will make it easier to set ranges
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
# make a node under the LOD switch
# for each lod (just because!)
self.addLOD(str(lodName))
# iterate over both dicts
for modelName in models[lodName].keys():
self.loadModel(models[lodName][modelName],
modelName, lodName, copy = copy,
okMissing = okMissing)
# then if there is a dictionary of dictionaries of anims
elif (type(anims[anims.keys()[0]])==type({})):
# then this is a multipart actor w/o LOD
for partName in models.keys():
# pass in each part
self.loadModel(models[partName], partName,
copy = copy, okMissing = okMissing)
else:
# it is a single part actor w/LOD
self.setLODNode(node = lodNode)
# preserve order of LOD's
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
self.addLOD(str(lodName))
# pass in dictionary of parts
self.loadModel(models[lodName], lodName=lodName,
copy = copy, okMissing = okMissing)
else:
# else it is a single part actor
self.loadModel(models, copy = copy, okMissing = okMissing)
# load anims
# make sure the actor has animations
if (anims):
if (len(anims) >= 1):
# if so, does it have a dictionary of dictionaries?
if (type(anims[anims.keys()[0]])==type({})):
# are the models a dict of dicts too?
if (type(models)==type({})):
if (type(models[models.keys()[0]]) == type({})):
# then we have a multi-part w/ LOD
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
# iterate over both dicts
for partName in anims.keys():
self.loadAnims(
anims[partName], partName, lodName)
else:
# then it must be multi-part w/o LOD
for partName in anims.keys():
self.loadAnims(anims[partName], partName)
elif (type(models)==type({})):
# then we have single-part w/ LOD
sortedKeys = models.keys()
sortedKeys.sort()
for lodName in sortedKeys:
self.loadAnims(anims, lodName=lodName)
else:
# else it is single-part w/o LOD
self.loadAnims(anims)
else:
self.copyActor(other, True) # overwrite everything
if setFinal:
# If setFinal is true, the Actor will set its top bounding
# volume to be the "final" bounding volume: the bounding
# volumes below the top volume will not be tested. If a
# cull test passes the top bounding volume, the whole
# Actor is rendered.
# We do this partly because an Actor is likely to be a
# fairly small object relative to the scene, and is pretty
# much going to be all onscreen or all offscreen anyway;
# and partly because of the Character bug that doesn't
# update the bounding volume for pieces that animate away
# from their original position. It's disturbing to see
# someone's hands disappear; better to cull the whole
# object or none of it.
self.__geomNode.node().setFinal(1)
def delete(self):
try:
self.Actor_deleted
return
except:
self.Actor_deleted = 1
self.cleanup()
def copyActor(self, other, overwrite=False):
# act like a copy constructor
self.gotName = other.gotName
# copy the scene graph elements of other
if (overwrite):
otherCopy = other.copyTo(NodePath())
otherCopy.detachNode()
# assign these elements to ourselve (overwrite)
self.assign(otherCopy)
else:
# just copy these to ourselves
otherCopy = other.copyTo(self)
# masad: check if otherCopy has a geomNode as its first child
# if actor is initialized with flattenable, then otherCopy, not
# its first child, is the geom node; check __init__, for reference
if other.getGeomNode().getName() == other.getName():
self.setGeomNode(otherCopy)
else:
self.setGeomNode(otherCopy.getChild(0))
# copy the switches for lods
self.switches = other.switches
self.__LODNode = self.find('**/+LODNode')
self.__hasLOD = 0
if (not self.__LODNode.isEmpty()):
self.__hasLOD = 1
# copy the part dictionary from other
self.__copyPartBundles(other)
self.__copySubpartDict(other)
self.__subpartsComplete = other.__subpartsComplete
# copy the anim dictionary from other
self.__copyAnimControls(other)
def __cmp__(self, other):
# Actor inherits from NodePath, which inherits a definition of
# __cmp__ from FFIExternalObject that uses the NodePath's
# compareTo() method to compare different NodePaths. But we
# don't want this behavior for Actors; Actors should only be
# compared pointerwise. A NodePath that happens to reference
# the same node is still different from the Actor.
if self is other:
return 0
else:
return 1
def __str__(self):
"""
Actor print function
"""
return "Actor %s, parts = %s, LODs = %s, anims = %s" % \
(self.getName(), self.getPartNames(), self.getLODNames(), self.getAnimNames())
def listJoints(self, partName="modelRoot", lodName="lodRoot"):
"""Handy utility function to list the joint hierarchy of the
actor. """
if self.mergeLODBundles:
partBundleDict = self.__commonBundleHandles
else:
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.error("no lod named: %s" % (lodName))
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef == None:
Actor.notify.error("no part named: %s" % (partName))
self.__doListJoints(0, partDef.getBundle(),
subpartDef.subset.isIncludeEmpty(), subpartDef.subset)
def __doListJoints(self, indentLevel, part, isIncluded, subset):
name = part.getName()
if subset.matchesInclude(name):
isIncluded = True
elif subset.matchesExclude(name):
isIncluded = False
if isIncluded:
value = ''
if hasattr(part, 'outputValue'):
lineStream = LineStream()
part.outputValue(lineStream)
value = lineStream.getLine()
print ' ' * indentLevel, part.getName(), value
for i in range(part.getNumChildren()):
self.__doListJoints(indentLevel + 2, part.getChild(i),
isIncluded, subset)
def getActorInfo(self):
"""
Utility function to create a list of information about an actor.
Useful for iterating over details of an actor.
"""
lodInfo = []
for lodName, partDict in self.__animControlDict.items():
if self.mergeLODBundles:
lodName = self.__sortedLODNames[0]
partInfo = []
for partName in partDict.keys():
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partBundleDict = self.__partBundleDict.get(lodName)
partDef = partBundleDict.get(subpartDef.truePartName)
partBundle = partDef.getBundle()
animDict = partDict[partName]
animInfo = []
for animName in animDict.keys():
file = animDict[animName].filename
animControl = animDict[animName].animControl
animInfo.append([animName, file, animControl])
partInfo.append([partName, partBundle, animInfo])
lodInfo.append([lodName, partInfo])
return lodInfo
def getAnimNames(self):
animNames = []
for lodName, lodInfo in self.getActorInfo():
for partName, bundle, animInfo in lodInfo:
for animName, file, animControl in animInfo:
if animName not in animNames:
animNames.append(animName)
return animNames
def pprint(self):
"""
Pretty print actor's details
"""
for lodName, lodInfo in self.getActorInfo():
print 'LOD:', lodName
for partName, bundle, animInfo in lodInfo:
print ' Part:', partName
print ' Bundle:', repr(bundle)
for animName, file, animControl in animInfo:
print ' Anim:', animName
print ' File:', file
if animControl == None:
print ' (not loaded)'
else:
print (' NumFrames: %d PlayRate: %0.2f' %
(animControl.getNumFrames(),
animControl.getPlayRate()))
def cleanup(self):
"""
Actor cleanup function
"""
self.stop(None)
self.clearPythonData()
self.flush()
if(self.__geomNode):
self.__geomNode.removeNode()
self.__geomNode = None
if not self.isEmpty():
self.removeNode()
def removeNode(self):
if self.__geomNode and (self.__geomNode.getNumChildren() > 0):
assert self.notify.warning("called actor.removeNode() on %s without calling cleanup()" % self.getName())
NodePath.removeNode(self)
def clearPythonData(self):
self.__commonBundleHandles = {}
self.__partBundleDict = {}
self.__subpartDict = {}
self.__sortedLODNames = []
self.__animControlDict = {}
def flush(self):
"""
Actor flush function
"""
self.clearPythonData()
if self.__LODNode and (not self.__LODNode.isEmpty()):
self.__LODNode.removeNode()
self.__LODNode = None
# remove all its children
if(self.__geomNode):
self.__geomNode.removeChildren()
self.__hasLOD = 0
# accessing
def getAnimControlDict(self):
return self.__animControlDict
def removeAnimControlDict(self):
self.__animControlDict = {}
def getPartBundleDict(self):
return self.__partBundleDict
def getPartBundles(self, partName = None):
""" Returns a list of PartBundle objects for the entire Actor,
or for the indicated part only. """
bundles = []
for lodName, partBundleDict in self.__partBundleDict.items():
if partName == None:
for partDef in partBundleDict.values():
bundles.append(partDef.getBundle())
else:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
bundles.append(partDef.getBundle())
else:
Actor.notify.warning("Couldn't find part: %s" % (partName))
return bundles
def __updateSortedLODNames(self):
# Cache the sorted LOD names so we don't have to grab them
# and sort them every time somebody asks for the list
self.__sortedLODNames = self.__partBundleDict.keys()
# Reverse sort the doing a string->int
def sortKey(x):
if not str(x).isdigit():
smap = {'h':3,
'm':2,
'l':1,
'f':0}
"""
sx = smap.get(x[0], None)
if sx is None:
self.notify.error('Invalid lodName: %s' % x)
"""
return smap[x[0]]
else:
return int(x)
self.__sortedLODNames.sort(key=sortKey, reverse=True)
def getLODNames(self):
"""
Return list of Actor LOD names. If not an LOD actor,
returns 'lodRoot'
Caution - this returns a reference to the list - not your own copy
"""
return self.__sortedLODNames
def getPartNames(self):
"""
Return list of Actor part names. If not an multipart actor,
returns 'modelRoot' NOTE: returns parts of arbitrary LOD
"""
partNames = []
if self.__partBundleDict:
partNames = self.__partBundleDict.values()[0].keys()
return partNames + self.__subpartDict.keys()
def getGeomNode(self):
"""
Return the node that contains all actor geometry
"""
return self.__geomNode
def setGeomNode(self, node):
"""
Set the node that contains all actor geometry
"""
self.__geomNode = node
def getLODNode(self):
"""
Return the node that switches actor geometry in and out"""
return self.__LODNode.node()
def setLODNode(self, node=None):
"""
Set the node that switches actor geometry in and out.
If one is not supplied as an argument, make one
"""
if (node == None):
node = LODNode.makeDefaultLod("lod")
if self.__LODNode:
self.__LODNode = node
else:
self.__LODNode = self.__geomNode.attachNewNode(node)
self.__hasLOD = 1
self.switches = {}
def useLOD(self, lodName):
"""
Make the Actor ONLY display the given LOD
"""
# make sure we don't call this twice in a row
# and pollute the the switches dictionary
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
child = self.__LODNode.find(str(lodName))
index = self.__LODNode.node().findChild(child.node())
self.__LODNode.node().forceSwitch(index)
def printLOD(self):
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
sortedKeys = self.__sortedLODNames
for eachLod in sortedKeys:
print "python switches for %s: in: %d, out %d" % (eachLod,
self.switches[eachLod][0],
self.switches[eachLod][1])
switchNum = self.__LODNode.node().getNumSwitches()
for eachSwitch in range(0, switchNum):
print "c++ switches for %d: in: %d, out: %d" % (eachSwitch,
self.__LODNode.node().getIn(eachSwitch),
self.__LODNode.node().getOut(eachSwitch))
def resetLOD(self):
"""
Restore all switch distance info (usually after a useLOD call)"""
self.__LODNode.node().clearForceSwitch()
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
## for eachLod in sortedKeys:
## index = sortedKeys.index(eachLod)
## self.__LODNode.node().setSwitch(index, self.switches[eachLod][0],
## self.switches[eachLod][1])
def addLOD(self, lodName, inDist=0, outDist=0, center=None):
"""addLOD(self, string)
Add a named node under the LODNode to parent all geometry
of a specific LOD under.
"""
self.__LODNode.attachNewNode(str(lodName))
# save the switch distance info
self.switches[lodName] = [inDist, outDist]
# add the switch distance info
self.__LODNode.node().addSwitch(inDist, outDist)
if center != None:
self.setCenter(center)
def setLOD(self, lodName, inDist=0, outDist=0):
"""setLOD(self, string)
Set the switch distance for given LOD
"""
# save the switch distance info
self.switches[lodName] = [inDist, outDist]
# add the switch distance info
## sortedKeys = self.switches.keys()
## sortedKeys.sort()
self.__LODNode.node().setSwitch(self.getLODIndex(lodName), inDist, outDist)
def getLODIndex(self, lodName):
"""getLODIndex(self)
safe method (but expensive) for retrieving the child index
"""
return list(self.__LODNode.getChildren()).index(self.getLOD(lodName))
def getLOD(self, lodName):
"""getLOD(self, string)
Get the named node under the LOD to which we parent all LOD
specific geometry to. Returns 'None' if not found
"""
if self.__LODNode:
lod = self.__LODNode.find(str(lodName))
if lod.isEmpty():
return None
else:
return lod
else:
return None
def hasLOD(self):
"""
Return 1 if the actor has LODs, 0 otherwise
"""
return self.__hasLOD
def setCenter(self, center):
if center == None:
center = Point3(0, 0, 0)
self.__LODCenter = center
if self.__LODNode:
self.__LODNode.node().setCenter(self.__LODCenter)
if self.__LODAnimation:
self.setLODAnimation(*self.__LODAnimation)
def setLODAnimation(self, farDistance, nearDistance, delayFactor):
""" Activates a special mode in which the Actor animates less
frequently as it gets further from the camera. This is
intended as a simple optimization to minimize the effort of
computing animation for lots of characters that may not
necessarily be very important to animate every frame.
If the character is closer to the camera than near_distance,
then it is animated its normal rate, every frame. If the
character is exactly far_distance away, it is animated only
every delay_factor seconds (which should be a number greater
than 0). If the character is between near_distance and
far_distance, its animation rate is linearly interpolated
according to its distance between the two. The interpolation
function continues beyond far_distance, so that the character
is animated increasingly less frequently as it gets farther
away. """
self.__LODAnimation = (farDistance, nearDistance, delayFactor)
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().setLodAnimation(self.__LODCenter, farDistance, nearDistance, delayFactor)
def clearLODAnimation(self):
""" Description: Undoes the effect of a recent call to
set_lod_animation(). Henceforth, the character will animate
every frame, regardless of its distance from the camera.
"""
self.__LODAnimation = None
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().clearLodAnimation()
def update(self, lod=0, partName=None, lodName=None, force=False):
""" Updates all of the Actor's joints in the indicated LOD.
The LOD may be specified by name, or by number, where 0 is the
highest level of detail, 1 is the next highest, and so on.
If force is True, this will update every joint, even if we
don't believe it's necessary.
Returns True if any joint has changed as a result of this,
False otherwise. """
if lodName == None:
lodNames = self.getLODNames()
else:
lodNames = [lodName]
anyChanged = False
if lod < len(lodNames):
lodName = lodNames[lod]
if partName == None:
partBundleDict = self.__partBundleDict[lodName]
partNames = partBundleDict.keys()
else:
partNames = [partName]
for partName in partNames:
partBundle = self.getPartBundle(partName, lodNames[lod])
if force:
if partBundle.forceUpdate():
anyChanged = True
else:
if partBundle.update():
anyChanged = True
else:
self.notify.warning('update() - no lod: %d' % lod)
return anyChanged
def getFrameRate(self, animName=None, partName=None):
"""getFrameRate(self, string, string=None)
Return actual frame rate of given anim name and given part.
If no anim specified, use the currently playing anim.
If no part specified, return anim durations of first part.
NOTE: returns info only for an arbitrary LOD
"""
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getFrameRate()
def getBaseFrameRate(self, animName=None, partName=None):
"""getBaseFrameRate(self, string, string=None)
Return frame rate of given anim name and given part, unmodified
by any play rate in effect.
"""
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getAnim().getBaseFrameRate()
def getPlayRate(self, animName=None, partName=None):
"""
Return the play rate of given anim for a given part.
If no part is given, assume first part in dictionary.
If no anim is given, find the current anim for the part.
NOTE: Returns info only for an arbitrary LOD
"""
if self.__animControlDict:
# use the first lod
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if controls:
return controls[0].getPlayRate()
return None
def setPlayRate(self, rate, animName, partName=None):
"""setPlayRate(self, float, string, string=None)
Set the play rate of given anim for a given part.
If no part is given, set for all parts in dictionary.
It used to be legal to let the animName default to the
currently-playing anim, but this was confusing and could lead
to the wrong anim's play rate getting set. Better to insist
on this parameter.
NOTE: sets play rate on all LODs"""
for control in self.getAnimControls(animName, partName):
control.setPlayRate(rate)
def getDuration(self, animName=None, partName=None,
fromFrame=None, toFrame=None):
"""
Return duration of given anim name and given part.
If no anim specified, use the currently playing anim.
If no part specified, return anim duration of first part.
NOTE: returns info for arbitrary LOD
"""
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
animControl = controls[0]
if fromFrame is None:
fromFrame = 0
if toFrame is None:
toFrame = animControl.getNumFrames()-1
return ((toFrame+1)-fromFrame) / animControl.getFrameRate()
def getNumFrames(self, animName=None, partName=None):
lodName = self.__animControlDict.keys()[0]
controls = self.getAnimControls(animName, partName)
if len(controls) == 0:
return None
return controls[0].getNumFrames()
def getFrameTime(self, anim, frame, partName=None):
numFrames = self.getNumFrames(anim,partName)
animTime = self.getDuration(anim,partName)
frameTime = animTime * float(frame) / numFrames
return frameTime
def getCurrentAnim(self, partName=None):
"""
Return the anim currently playing on the actor. If part not
specified return current anim of an arbitrary part in dictionary.
NOTE: only returns info for an arbitrary LOD
"""
if len(self.__animControlDict.items()) == 0:
return
lodName, animControlDict = self.__animControlDict.items()[0]
if partName == None:
partName, animDict = animControlDict.items()[0]
else:
animDict = animControlDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
return None
# loop through all anims for named part and find if any are playing
for animName, anim in animDict.items():
if anim.animControl and anim.animControl.isPlaying():
return animName
# we must have found none, or gotten an error
return None
def getCurrentFrame(self, animName=None, partName=None):
"""
Return the current frame number of the named anim, or if no
anim is specified, then the anim current playing on the
actor. If part not specified return current anim of first part
in dictionary. NOTE: only returns info for an arbitrary LOD
"""
lodName, animControlDict = self.__animControlDict.items()[0]
if partName == None:
partName, animDict = animControlDict.items()[0]
else:
animDict = animControlDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
return None
if animName:
anim = animDict.get(animName)
if not anim:
Actor.notify.warning("couldn't find anim: %s" % (animName))
elif anim.animControl:
return anim.animControl.getFrame()
else:
# loop through all anims for named part and find if any are playing
for animName, anim in animDict.items():
if anim.animControl and anim.animControl.isPlaying():
return anim.animControl.getFrame()
# we must have found none, or gotten an error
return None
# arranging
def getPart(self, partName, lodName="lodRoot"):
"""
Find the named part in the optional named lod and return it, or
return None if not present
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
return partDef.partBundleNP
return None
def getPartBundle(self, partName, lodName="lodRoot"):
"""
Find the named part in the optional named lod and return its
associated PartBundle, or return None if not present
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef != None:
return partDef.getBundle()
return None
def removePart(self, partName, lodName="lodRoot"):
"""
Remove the geometry and animations of the named part of the
optional named lod if present.
NOTE: this will remove child geometry also!
"""
# find the corresponding part bundle dict
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
# remove the part
if (partName in partBundleDict):
partBundleDict[partName].partBundleNP.removeNode()
del(partBundleDict[partName])
# find the corresponding anim control dict
if self.mergeLODBundles:
lodName = 'common'
partDict = self.__animControlDict.get(lodName)
if not partDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
# remove the animations
if (partName in partDict):
del(partDict[partName])
# remove the bundle handle, in case this part is ever
# loaded again in the future
if partName in self.__commonBundleHandles:
del self.__commonBundleHandles[partName]
def hidePart(self, partName, lodName="lodRoot"):
"""
Make the given part of the optionally given lod not render,
even though still in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.hide()
else:
Actor.notify.warning("no part named %s!" % (partName))
def showPart(self, partName, lodName="lodRoot"):
"""
Make the given part render while in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.show()
else:
Actor.notify.warning("no part named %s!" % (partName))
def showAllParts(self, partName, lodName="lodRoot"):
"""
Make the given part and all its children render while in the tree.
NOTE: this will affect child geometry
"""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return
partDef = partBundleDict.get(partName)
if partDef:
partDef.partBundleNP.show()
partDef.partBundleNP.getChildren().show()
else:
Actor.notify.warning("no part named %s!" % (partName))
def exposeJoint(self, node, partName, jointName, lodName="lodRoot",
localTransform = 0):
"""exposeJoint(self, NodePath, string, string, key="lodRoot")
Starts the joint animating the indicated node. As the joint
animates, it will transform the node by the corresponding
amount. This will replace whatever matrix is on the node each
frame. The default is to expose the net transform from the root,
but if localTransform is true, only the node's local transform
from its parent is exposed."""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
# Get a handle to the joint.
joint = bundle.findChild(jointName)
if node == None:
node = self.attachNewNode(jointName)
if (joint):
if localTransform:
joint.addLocalTransform(node.node())
else:
joint.addNetTransform(node.node())
else:
Actor.notify.warning("no joint named %s!" % (jointName))
return node
def stopJoint(self, partName, jointName, lodName="lodRoot"):
"""stopJoint(self, string, string, key="lodRoot")
Stops the joint from animating external nodes. If the joint
is animating a transform on a node, this will permanently stop
it. However, this does not affect vertex animations."""
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
# Get a handle to the joint.
joint = bundle.findChild(jointName)
if (joint):
joint.clearNetTransforms()
joint.clearLocalTransforms()
else:
Actor.notify.warning("no joint named %s!" % (jointName))
def getJoints(self, partName = None, jointName = '*', lodName = None):
""" Returns the list of all joints, from the named part or
from all parts, that match the indicated jointName. The
jointName may include pattern characters like *. """
joints=[]
pattern = GlobPattern(jointName)
if lodName == None and self.mergeLODBundles:
# Get the common bundle.
partBundleDicts = [self.__commonBundleHandles]
elif lodName == None:
# Get all LOD's.
partBundleDicts = self.__partBundleDict.values()
else:
# Get one LOD.
partBundleDict = self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("couldn't find lod: %s" % (lodName))
return []
partBundleDicts = [partBundleDict]
for partBundleDict in partBundleDicts:
parts = []
if partName:
subpartDef = self.__subpartDict.get(partName, None)
if not subpartDef:
# Whole part
subset = None
partDef = partBundleDict.get(partName)
else:
# Sub-part
subset = subpartDef.subset
partDef = partBundleDict.get(subpartDef.truePartName)
if not partDef:
Actor.notify.warning("no part named %s!" % (partName))
return []
parts = [partDef]
else:
subset = None
parts = partBundleDict.values()
for partData in parts:
partBundle = partData.getBundle()
if not pattern.hasGlobCharacters() and not subset:
# The simple case.
joint = partBundle.findChild(jointName)
if joint:
joints.append(joint)
else:
# The more complex case.
isIncluded = True
if subset:
isIncluded = subset.isIncludeEmpty()
self.__getPartJoints(joints, pattern, partBundle, subset, isIncluded)
return joints
def getOverlappingJoints(self, partNameA, partNameB, jointName = '*', lodName = None):
""" Returns the set of joints, matching jointName, that are
shared between partNameA and partNameB. """
jointsA = set(self.getJoints(partName = partNameA, jointName = jointName, lodName = lodName))
jointsB = set(self.getJoints(partName = partNameB, jointName = jointName, lodName = lodName))
return jointsA & jointsB
def __getPartJoints(self, joints, pattern, partNode, subset, isIncluded):
""" Recursively walks the joint hierarchy to look for matching
joint names, implementing getJoints(). """
name = partNode.getName()
if subset:
# Constrain the traversal just to the named subset.
if subset.matchesInclude(name):
isIncluded = True
elif subset.matchesExclude(name):
isIncluded = False
if isIncluded and pattern.matches(name) and isinstance(partNode, MovingPartBase):
joints.append(partNode)
for child in partNode.getChildren():
self.__getPartJoints(joints, pattern, child, subset, isIncluded)
def getJointTransform(self, partName, jointName, lodName='lodRoot'):
partBundleDict=self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
joint = bundle.findChild(jointName)
if joint == None:
Actor.notify.warning("no joint named %s!" % (jointName))
return None
return joint.getDefaultValue()
def getJointTransformState(self, partName, jointName, lodName='lodRoot'):
partBundleDict=self.__partBundleDict.get(lodName)
if not partBundleDict:
Actor.notify.warning("no lod named: %s" % (lodName))
return None
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
bundle = partDef.getBundle()
else:
Actor.notify.warning("no part named %s!" % (partName))
return None
joint = bundle.findChild(jointName)
if joint == None:
Actor.notify.warning("no joint named %s!" % (jointName))
return None
return joint.getTransformState()
def controlJoint(self, node, partName, jointName, lodName="lodRoot"):
"""The converse of exposeJoint: this associates the joint with
the indicated node, so that the joint transform will be copied
from the node to the joint each frame. This can be used for
programmer animation of a particular joint at runtime.
The parameter node should be the NodePath for the node whose
transform will animate the joint. If node is None, a new node
will automatically be created and loaded with the joint's
initial transform. In either case, the node used will be
returned.
It used to be necessary to call this before any animations
have been loaded and bound, but that is no longer so.
"""
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
anyGood = False
for bundleDict in self.__partBundleDict.values():
bundle = bundleDict[trueName].getBundle()
if node == None:
node = self.attachNewNode(ModelNode(jointName))
joint = bundle.findChild(jointName)
if joint and isinstance(joint, MovingPartMatrix):
node.setMat(joint.getDefaultValue())
if bundle.controlJoint(jointName, node.node()):
anyGood = True
if not anyGood:
self.notify.warning("Cannot control joint %s" % (jointName))
return node
def freezeJoint(self, partName, jointName, transform = None,
pos=Vec3(0,0,0), hpr=Vec3(0,0,0), scale=Vec3(1,1,1)):
"""Similar to controlJoint, but the transform assigned is
static, and may not be animated at runtime (without another
subsequent call to freezeJoint). This is slightly more
optimal than controlJoint() for cases in which the transform
is not intended to be animated during the lifetime of the
Actor. """
if transform == None:
transform = TransformState.makePosHprScale(pos, hpr, scale)
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
anyGood = False
for bundleDict in self.__partBundleDict.values():
if bundleDict[trueName].getBundle().freezeJoint(jointName, transform):
anyGood = True
if not anyGood:
self.notify.warning("Cannot freeze joint %s" % (jointName))
def releaseJoint(self, partName, jointName):
"""Undoes a previous call to controlJoint() or freezeJoint()
and restores the named joint to its normal animation. """
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
trueName = subpartDef.truePartName
for bundleDict in self.__partBundleDict.values():
bundleDict[trueName].getBundle().releaseJoint(jointName)
def instance(self, path, partName, jointName, lodName="lodRoot"):
"""instance(self, NodePath, string, string, key="lodRoot")
Instance a nodePath to an actor part at a joint called jointName"""
partBundleDict = self.__partBundleDict.get(lodName)
if partBundleDict:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
joint = partDef.partBundleNP.find("**/" + jointName)
if (joint.isEmpty()):
Actor.notify.warning("%s not found!" % (jointName))
else:
return path.instanceTo(joint)
else:
Actor.notify.warning("no part named %s!" % (partName))
else:
Actor.notify.warning("no lod named %s!" % (lodName))
def attach(self, partName, anotherPartName, jointName, lodName="lodRoot"):
"""attach(self, string, string, string, key="lodRoot")
Attach one actor part to another at a joint called jointName"""
partBundleDict = self.__partBundleDict.get(lodName)
if partBundleDict:
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDef = partBundleDict.get(subpartDef.truePartName)
if partDef:
anotherPartDef = partBundleDict.get(anotherPartName)
if anotherPartDef:
joint = anotherPartDef.partBundleNP.find("**/" + jointName)
if (joint.isEmpty()):
Actor.notify.warning("%s not found!" % (jointName))
else:
partDef.partBundleNP.reparentTo(joint)
else:
Actor.notify.warning("no part named %s!" % (anotherPartName))
else:
Actor.notify.warning("no part named %s!" % (partName))
else:
Actor.notify.warning("no lod named %s!" % (lodName))
def drawInFront(self, frontPartName, backPartName, mode,
root=None, lodName=None):
"""drawInFront(self, string, int, string=None, key=None)
Arrange geometry so the frontPart(s) are drawn in front of
backPart.
If mode == -1, the geometry is simply arranged to be drawn in
the correct order, assuming it is already under a
direct-render scene graph (like the DirectGui system). That
is, frontPart is reparented to backPart, and backPart is
reordered to appear first among its siblings.
If mode == -2, the geometry is arranged to be drawn in the
correct order, and depth test/write is turned off for
frontPart.
If mode == -3, frontPart is drawn as a decal onto backPart.
This assumes that frontPart is mostly coplanar with and does
not extend beyond backPart, and that backPart is mostly flat
(not self-occluding).
If mode > 0, the frontPart geometry is placed in the 'fixed'
bin, with the indicated drawing order. This will cause it to
be drawn after almost all other geometry. In this case, the
backPartName is actually unused.
Takes an optional argument root as the start of the search for the
given parts. Also takes optional lod name to refine search for the
named parts. If root and lod are defined, we search for the given
root under the given lod.
"""
# check to see if we are working within an lod
if lodName != None:
# find the named lod node
lodRoot = self.__LODNode.find(str(lodName))
if root == None:
# no need to look further
root = lodRoot
else:
# look for root under lod
root = lodRoot.find("**/" + root)
else:
# start search from self if no root and no lod given
if root == None:
root = self
frontParts = root.findAllMatches("**/" + frontPartName)
if mode > 0:
# Use the 'fixed' bin instead of reordering the scene
# graph.
numFrontParts = frontParts.getNumPaths()
for partNum in range(0, numFrontParts):
frontParts[partNum].setBin('fixed', mode)
return
if mode == -2:
# Turn off depth test/write on the frontParts.
numFrontParts = frontParts.getNumPaths()
for partNum in range(0, numFrontParts):
frontParts[partNum].setDepthWrite(0)
frontParts[partNum].setDepthTest(0)
# Find the back part.
backPart = root.find("**/" + backPartName)
if (backPart.isEmpty()):
Actor.notify.warning("no part named %s!" % (backPartName))
return
if mode == -3:
# Draw as a decal.
backPart.node().setEffect(DecalEffect.make())
else:
# Reorder the backPart to be the first of its siblings.
backPart.reparentTo(backPart.getParent(), -1)
#reparent all the front parts to the back part
frontParts.reparentTo(backPart)
def fixBounds(self, partName = None):
if(partName == None):
#iterate through everything
for lodData in self.__partBundleDict.values():
for partData in lodData.values():
char = partData.partBundleNP
char.node().update()
geomNodes = char.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in xrange(numGeomNodes):
thisGeomNode = geomNodes.getPath(nodeNum)
numGeoms = thisGeomNode.node().getNumGeoms()
for geomNum in xrange(numGeoms):
thisGeom = thisGeomNode.node().getGeom(geomNum)
thisGeom.markBoundsStale()
thisGeomNode.node().markInternalBoundsStale()
else:
#iterate through for a specific part
for lodData in self.__partBundleDict.values():
partData = lodData.get(partName)
if(partData):
char = partData.partBundleNP
char.node().update()
geomNodes = char.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in xrange(numGeomNodes):
thisGeomNode = geomNodes.getPath(nodeNum)
numGeoms = thisGeomNode.node().getNumGeoms()
for geomNum in xrange(numGeoms):
thisGeom = thisGeomNode.node().getGeom(geomNum)
thisGeom.markBoundsStale()
thisGeomNode.node().markInternalBoundsStale()
def fixBounds_old(self, part=None):
"""fixBounds(self, nodePath=None)
Force recomputation of bounding spheres for all geoms
in a given part. If no part specified, fix all geoms
in this actor
"""
# if no part name specified fix all parts
if (part==None):
part = self
# update all characters first
charNodes = part.findAllMatches("**/+Character")
numCharNodes = charNodes.getNumPaths()
for charNum in range(0, numCharNodes):
(charNodes.getPath(charNum)).node().update()
# for each geomNode, iterate through all geoms and force update
# of bounding spheres by marking current bounds as stale
geomNodes = part.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in range(0, numGeomNodes):
thisGeomNode = geomNodes.getPath(nodeNum)
numGeoms = thisGeomNode.node().getNumGeoms()
for geomNum in range(0, numGeoms):
thisGeom = thisGeomNode.node().getGeom(geomNum)
thisGeom.markBoundsStale()
assert Actor.notify.debug("fixing bounds for node %s, geom %s" % \
(nodeNum, geomNum))
thisGeomNode.node().markInternalBoundsStale()
def showAllBounds(self):
"""
Show the bounds of all actor geoms
"""
geomNodes = self.__geomNode.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in range(0, numGeomNodes):
geomNodes.getPath(nodeNum).showBounds()
def hideAllBounds(self):
"""
Hide the bounds of all actor geoms
"""
geomNodes = self.__geomNode.findAllMatches("**/+GeomNode")
numGeomNodes = geomNodes.getNumPaths()
for nodeNum in range(0, numGeomNodes):
geomNodes.getPath(nodeNum).hideBounds()
# actions
def animPanel(self):
from direct.showbase import TkGlobal
from direct.tkpanels import AnimPanel
return AnimPanel.AnimPanel(self)
def stop(self, animName=None, partName=None):
"""stop(self, string=None, string=None)
Stop named animation on the given part of the actor.
If no name specified then stop all animations on the actor.
NOTE: stops all LODs"""
for control in self.getAnimControls(animName, partName):
control.stop()
def play(self, animName, partName=None, fromFrame=None, toFrame=None):
"""play(self, string, string=None)
Play the given animation on the given part of the actor.
If no part is specified, try to play on all parts. NOTE:
plays over ALL LODs"""
if fromFrame == None:
for control in self.getAnimControls(animName, partName):
control.play()
else:
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.play(fromFrame, control.getNumFrames() - 1)
else:
control.play(fromFrame, toFrame)
def loop(self, animName, restart=1, partName=None,
fromFrame=None, toFrame=None):
"""loop(self, string, int=1, string=None)
Loop the given animation on the given part of the actor,
restarting at zero frame if requested. If no part name
is given then try to loop on all parts. NOTE: loops on
all LOD's
"""
if fromFrame == None:
for control in self.getAnimControls(animName, partName):
control.loop(restart)
else:
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.loop(restart, fromFrame, control.getNumFrames() - 1)
else:
control.loop(restart, fromFrame, toFrame)
def pingpong(self, animName, restart=1, partName=None,
fromFrame=None, toFrame=None):
"""pingpong(self, string, int=1, string=None)
Loop the given animation on the given part of the actor,
restarting at zero frame if requested. If no part name
is given then try to loop on all parts. NOTE: loops on
all LOD's"""
if fromFrame == None:
fromFrame = 0
for control in self.getAnimControls(animName, partName):
if toFrame == None:
control.pingpong(restart, fromFrame, control.getNumFrames() - 1)
else:
control.pingpong(restart, fromFrame, toFrame)
def pose(self, animName, frame, partName=None, lodName=None):
"""pose(self, string, int, string=None)
Pose the actor in position found at given frame in the specified
animation for the specified part. If no part is specified attempt
to apply pose to all parts."""
for control in self.getAnimControls(animName, partName, lodName):
control.pose(frame)
def setBlend(self, animBlend = None, frameBlend = None,
blendType = None, partName = None):
"""
Changes the way the Actor handles blending of multiple
different animations, and/or interpolation between consecutive
frames.
The animBlend and frameBlend parameters are boolean flags.
You may set either or both to True or False. If you do not
specify them, they do not change from the previous value.
When animBlend is True, multiple different animations may
simultaneously be playing on the Actor. This means you may
call play(), loop(), or pose() on multiple animations and have
all of them contribute to the final pose each frame.
In this mode (that is, when animBlend is True), starting a
particular animation with play(), loop(), or pose() does not
implicitly make the animation visible; you must also call
setControlEffect() for each animation you wish to use to
indicate how much each animation contributes to the final
pose.
The frameBlend flag is unrelated to playing multiple
animations. It controls whether the Actor smoothly
interpolates between consecutive frames of its animation (when
the flag is True) or holds each frame until the next one is
ready (when the flag is False). The default value of
frameBlend is controlled by the interpolate-frames Config.prc
variable.
In either case, you may also specify blendType, which controls
the precise algorithm used to blend two or more different
matrix values into a final result. Different skeleton
hierarchies may benefit from different algorithms. The
default blendType is controlled by the anim-blend-type
Config.prc variable.
"""
for bundle in self.getPartBundles(partName = partName):
if blendType != None:
bundle.setBlendType(blendType)
if animBlend != None:
bundle.setAnimBlendFlag(animBlend)
if frameBlend != None:
bundle.setFrameBlendFlag(frameBlend)
def enableBlend(self, blendType = PartBundle.BTNormalizedLinear, partName = None):
"""
Enables blending of multiple animations simultaneously.
After this is called, you may call play(), loop(), or pose()
on multiple animations and have all of them contribute to the
final pose each frame.
With blending in effect, starting a particular animation with
play(), loop(), or pose() does not implicitly make the
animation visible; you must also call setControlEffect() for
each animation you wish to use to indicate how much each
animation contributes to the final pose.
This method is deprecated. You should use setBlend() instead.
"""
self.setBlend(animBlend = True, blendType = blendType, partName = partName)
def disableBlend(self, partName = None):
"""
Restores normal one-animation-at-a-time operation after a
previous call to enableBlend().
This method is deprecated. You should use setBlend() instead.
"""
self.setBlend(animBlend = False, partName = partName)
def setControlEffect(self, animName, effect,
partName = None, lodName = None):
"""
Sets the amount by which the named animation contributes to
the overall pose. This controls blending of multiple
animations; it only makes sense to call this after a previous
call to setBlend(animBlend = True).
"""
for control in self.getAnimControls(animName, partName, lodName):
control.getPart().setControlEffect(control, effect)
def getAnimFilename(self, animName, partName='modelRoot'):
"""
getAnimFilename(self, animName)
return the animFilename given the animName
"""
if self.mergeLODBundles:
lodName = 'common'
elif self.switches:
lodName = str(self.switches.keys()[0])
else:
lodName = 'lodRoot'
try:
return self.__animControlDict[lodName][partName][animName].filename
except:
return None
def getAnimControl(self, animName, partName=None, lodName=None,
allowAsyncBind = True):
"""
getAnimControl(self, string, string, string="lodRoot")
Search the animControl dictionary indicated by lodName for
a given anim and part. If none specified, try the first part and lod.
Return the animControl if present, or None otherwise.
"""
if not partName:
partName = 'modelRoot'
if self.mergeLODBundles:
lodName = 'common'
elif not lodName:
if self.switches:
lodName = str(self.switches.keys()[0])
else:
lodName = 'lodRoot'
partDict = self.__animControlDict.get(lodName)
# if this assertion fails, named lod was not present
assert partDict != None
animDict = partDict.get(partName)
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (partName))
else:
anim = animDict.get(animName)
if anim == None:
# anim was not present
assert Actor.notify.debug("couldn't find anim: %s" % (animName))
pass
else:
# bind the animation first if we need to
if not anim.animControl:
self.__bindAnimToPart(animName, partName, lodName,
allowAsyncBind = allowAsyncBind)
elif not allowAsyncBind:
anim.animControl.waitPending()
return anim.animControl
return None
def getAnimControls(self, animName=None, partName=None, lodName=None,
allowAsyncBind = True):
"""getAnimControls(self, string, string=None, string=None)
Returns a list of the AnimControls that represent the given
animation for the given part and the given lod.
If animName is None or omitted, the currently-playing
animation (or all currently-playing animations) is returned.
If animName is True, all animations are returned. If animName
is a single string name, that particular animation is
returned. If animName is a list of string names, all of the
names animations are returned.
If partName is None or omitted, all parts are returned (or
possibly the one overall Actor part, according to the
subpartsComplete flag).
If lodName is None or omitted, all LOD's are returned.
"""
if partName == None and self.__subpartsComplete:
# If we have the __subpartsComplete flag, and no partName
# is specified, it really means to play the animation on
# all subparts, not on the overall Actor.
partName = self.__subpartDict.keys()
controls = []
# build list of lodNames and corresponding animControlDicts
# requested.
if lodName == None or self.mergeLODBundles:
# Get all LOD's
animControlDictItems = self.__animControlDict.items()
else:
partDict = self.__animControlDict.get(lodName)
if partDict == None:
Actor.notify.warning("couldn't find lod: %s" % (lodName))
animControlDictItems = []
else:
animControlDictItems = [(lodName, partDict)]
for lodName, partDict in animControlDictItems:
# Now, build the list of partNames and the corresponding
# animDicts.
if partName == None:
# Get all main parts, but not sub-parts.
animDictItems = []
for thisPart, animDict in partDict.items():
if thisPart not in self.__subpartDict:
animDictItems.append((thisPart, animDict))
else:
# Get exactly the named part or parts.
if isinstance(partName, types.StringTypes):
partNameList = [partName]
else:
partNameList = partName
animDictItems = []
for pName in partNameList:
animDict = partDict.get(pName)
if animDict == None:
# Maybe it's a subpart that hasn't been bound yet.
subpartDef = self.__subpartDict.get(pName)
if subpartDef:
animDict = {}
partDict[pName] = animDict
if animDict == None:
# part was not present
Actor.notify.warning("couldn't find part: %s" % (pName))
else:
animDictItems.append((pName, animDict))
if animName is None:
# get all playing animations
for thisPart, animDict in animDictItems:
for anim in animDict.values():
if anim.animControl and anim.animControl.isPlaying():
controls.append(anim.animControl)
else:
# get the named animation(s) only.
if isinstance(animName, types.StringTypes):
# A single animName
animNameList = [animName]
else:
# A list of animNames, or True to indicate all anims.
animNameList = animName
for thisPart, animDict in animDictItems:
names = animNameList
if animNameList is True:
names = animDict.keys()
for animName in names:
anim = animDict.get(animName)
if anim == None and partName != None:
for pName in partNameList:
# Maybe it's a subpart that hasn't been bound yet.
subpartDef = self.__subpartDict.get(pName)
if subpartDef:
truePartName = subpartDef.truePartName
anim = partDict[truePartName].get(animName)
if anim:
anim = anim.makeCopy()
animDict[animName] = anim
if anim == None:
# anim was not present
assert Actor.notify.debug("couldn't find anim: %s" % (animName))
pass
else:
# bind the animation first if we need to
animControl = anim.animControl
if animControl == None:
animControl = self.__bindAnimToPart(
animName, thisPart, lodName,
allowAsyncBind = allowAsyncBind)
elif not allowAsyncBind:
# Force the animation to load if it's
# not already loaded.
animControl.waitPending()
if animControl:
controls.append(animControl)
return controls
def loadModel(self, modelPath, partName="modelRoot", lodName="lodRoot",
copy = True, okMissing = None, autoBindAnims = True):
"""Actor model loader. Takes a model name (ie file path), a part
name(defaults to "modelRoot") and an lod name(defaults to "lodRoot").
"""
assert partName not in self.__subpartDict
assert Actor.notify.debug("in loadModel: %s, part: %s, lod: %s, copy: %s" % \
(modelPath, partName, lodName, copy))
if isinstance(modelPath, NodePath):
# If we got a NodePath instead of a string, use *that* as
# the model directly.
if (copy):
model = modelPath.copyTo(NodePath())
else:
model = modelPath
else:
# otherwise, we got the name of the model to load.
loaderOptions = self.modelLoaderOptions
if not copy:
# If copy = 0, then we should always hit the disk.
loaderOptions = LoaderOptions(loaderOptions)
loaderOptions.setFlags(loaderOptions.getFlags() & ~LoaderOptions.LFNoRamCache)
# Pass loaderOptions to specify that we want to
# get the skeleton model. This only matters to model
# files (like .mb) for which we can choose to extract
# either the skeleton or animation, or neither.
model = loader.loadModel(modelPath, loaderOptions = loaderOptions, okMissing = okMissing)
if (model == None):
raise StandardError, "Could not load Actor model %s" % (modelPath)
if (model.node().isOfType(Character.getClassType())):
bundleNP = model
else:
bundleNP = model.find("**/+Character")
if (bundleNP.isEmpty()):
Actor.notify.warning("%s is not a character!" % (modelPath))
model.reparentTo(self.__geomNode)
else:
# Maybe the model file also included some animations. If
# so, try to bind them immediately and put them into the
# animControlDict.
if autoBindAnims:
acc = AnimControlCollection()
autoBind(model.node(), acc, ~0)
numAnims = acc.getNumAnims()
else:
numAnims = 0
# Now extract out the Character and integrate it with
# the Actor.
if (lodName!="lodRoot"):
# parent to appropriate node under LOD switch
bundleNP.reparentTo(self.__LODNode.find(str(lodName)))
else:
bundleNP.reparentTo(self.__geomNode)
self.__prepareBundle(bundleNP, model.node(), partName, lodName)
# we rename this node to make Actor copying easier
bundleNP.node().setName("%s%s"%(Actor.partPrefix,partName))
if numAnims != 0:
# If the model had some animations, store them in the
# dict so they can be played.
Actor.notify.info("model contains %s animations." % (numAnims))
# make sure this lod is in anim control dict
if self.mergeLODBundles:
lodName = 'common'
self.__animControlDict.setdefault(lodName, {})
self.__animControlDict[lodName].setdefault(partName, {})
for i in range(numAnims):
animControl = acc.getAnim(i)
animName = acc.getAnimName(i)
animDef = Actor.AnimDef()
animDef.animControl = animControl
self.__animControlDict[lodName][partName][animName] = animDef
def __prepareBundle(self, bundleNP, partModel,
partName="modelRoot", lodName="lodRoot"):
assert partName not in self.__subpartDict
# Rename the node at the top of the hierarchy, if we
# haven't already, to make it easier to identify this
# actor in the scene graph.
if not self.gotName:
self.node().setName(bundleNP.node().getName())
self.gotName = 1
bundleDict = self.__partBundleDict.get(lodName, None)
if bundleDict == None:
# make a dictionary to store these parts in
bundleDict = {}
self.__partBundleDict[lodName] = bundleDict
self.__updateSortedLODNames()
node = bundleNP.node()
# A model loaded from disk will always have just one bundle.
assert(node.getNumBundles() == 1)
bundleHandle = node.getBundleHandle(0)
if self.mergeLODBundles:
loadedBundleHandle = self.__commonBundleHandles.get(partName, None)
if loadedBundleHandle:
# We've already got a bundle for this part; merge it.
node.mergeBundles(bundleHandle, loadedBundleHandle)
bundleHandle = loadedBundleHandle
else:
# We haven't already got a bundle for this part; store it.
self.__commonBundleHandles[partName] = bundleHandle
bundleDict[partName] = Actor.PartDef(bundleNP, bundleHandle, partModel)
def makeSubpart(self, partName, includeJoints, excludeJoints = [],
parent="modelRoot", overlapping = False):
"""Defines a new "part" of the Actor that corresponds to the
same geometry as the named parent part, but animates only a
certain subset of the joints. This can be used for
partial-body animations, for instance to animate a hand waving
while the rest of the body continues to play its walking
animation.
includeJoints is a list of joint names that are to be animated
by the subpart. Each name can include globbing characters
like '?' or '*', which will match one or any number of
characters, respectively. Including a joint by naming it in
includeJoints implicitly includes all of the descendents of
that joint as well, except for excludeJoints, below.
excludeJoints is a list of joint names that are *not* to be
animated by the subpart. As in includeJoints, each name can
include globbing characters. If a joint is named by
excludeJoints, it will not be included (and neither will any
of its descendents), even if a parent joint was named by
includeJoints.
if overlapping is False, an error is raised (in the dev build)
if this subpart shares joints with any other subparts. If
overlapping is True, no such error is raised.
parent is the actual partName that this subpart is based
on."""
assert partName not in self.__subpartDict
subpartDef = self.__subpartDict.get(parent, Actor.SubpartDef(''))
subset = PartSubset(subpartDef.subset)
for name in includeJoints:
subset.addIncludeJoint(GlobPattern(name))
for name in excludeJoints:
subset.addExcludeJoint(GlobPattern(name))
self.__subpartDict[partName] = Actor.SubpartDef(parent, subset)
if __dev__ and not overlapping and self.validateSubparts.getValue():
# Without the overlapping flag True, we're not allowed to
# define overlapping sub-parts. Verify that we haven't.
for otherPartName, otherPartDef in self.__subpartDict.items():
if otherPartName != partName and otherPartDef.truePartName == parent:
joints = self.getOverlappingJoints(partName, otherPartName)
if joints:
raise StandardError, 'Overlapping joints: %s and %s' % (partName, otherPartName)
def setSubpartsComplete(self, flag):
"""Sets the subpartsComplete flag. This affects the behavior
of play(), loop(), stop(), etc., when no explicit parts are
specified.
When this flag is False (the default), play() with no parts
means to play the animation on the overall Actor, which is a
separate part that overlaps each of the subparts. If you then
play a different animation on a subpart, it may stop the
overall animation (in non-blend mode) or blend with it (in
blend mode).
When this flag is True, play() with no parts means to play the
animation on each of the subparts--instead of on the overall
Actor. In this case, you may then play a different animation
on a subpart, which replaces only that subpart's animation.
It makes sense to set this True when the union of all of your
subparts completely defines the entire Actor.
"""
self.__subpartsComplete = flag
if __dev__ and self.__subpartsComplete and self.validateSubparts.getValue():
# If we've specified any parts at all so far, make sure we've
# specified all of them.
if self.__subpartDict:
self.verifySubpartsComplete()
def getSubpartsComplete(self):
"""See setSubpartsComplete()."""
return self.__subpartsComplete
def verifySubpartsComplete(self, partName = None, lodName = None):
""" Ensures that each joint is defined by at least one
subPart. Prints a warning if this is not the case. """
if partName:
assert partName not in self.__subpartDict
partNames = [partName]
else:
if lodName:
partNames = self.__partBundleDict[lodName].keys()
else:
partNames = self.__partBundleDict.values()[0].keys()
for partName in partNames:
subJoints = set()
for subPartName, subPartDef in self.__subpartDict.items():
if subPartName != partName and subPartDef.truePartName == partName:
subJoints |= set(self.getJoints(partName = subPartName, lodName = lodName))
allJoints = set(self.getJoints(partName = partName, lodName = lodName))
diff = allJoints.difference(subJoints)
if diff:
self.notify.warning('Uncovered joints: %s' % (list(diff)))
def loadAnims(self, anims, partName="modelRoot", lodName="lodRoot"):
"""loadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim loader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults
to 'lodRoot' for non-LOD actors) and dict of corresponding
anims in the form animName:animPath{}
"""
reload = True
if self.mergeLODBundles:
lodNames = ['common']
elif lodName == 'all':
reload = False
lodNames = self.switches.keys()
lodNames.sort()
for i in range(0,len(lodNames)):
lodNames[i] = str(lodNames[i])
else:
lodNames = [lodName]
assert Actor.notify.debug("in loadAnims: %s, part: %s, lod: %s" %
(anims, partName, lodNames[0]))
firstLoad = True
if not reload:
try:
self.__animControlDict[lodNames[0]][partName]
firstLoad = False
except:
pass
for lName in lodNames:
if firstLoad:
self.__animControlDict.setdefault(lName, {})
self.__animControlDict[lName].setdefault(partName, {})
for animName, filename in anims.items():
# make sure this lod is in anim control dict
for lName in lodNames:
if firstLoad:
self.__animControlDict[lName][partName][animName] = Actor.AnimDef()
if isinstance(filename, NodePath):
# We were given a pre-load anim bundle, not a filename.
assert not filename.isEmpty()
if filename.node().isOfType(AnimBundleNode.getClassType()):
animBundleNP = filename
else:
animBundleNP = filename.find('**/+AnimBundleNode')
assert not animBundleNP.isEmpty()
self.__animControlDict[lName][partName][animName].animBundle = animBundleNP.node().getBundle()
else:
# We were given a filename that must be loaded.
# Store the filename only; we will load and bind
# it (and produce an AnimControl) when it is
# played.
self.__animControlDict[lName][partName][animName].filename = filename
def initAnimsOnAllLODs(self,partNames):
if self.mergeLODBundles:
lodNames = ['common']
else:
lodNames = self.__partBundleDict.keys()
for lod in lodNames:
for part in partNames:
self.__animControlDict.setdefault(lod,{})
self.__animControlDict[lod].setdefault(part, {})
#for animName, filename in anims.items():
# # make sure this lod is in anim control dict
# for lod in self.__partBundleDict.keys():
# # store the file path only; we will bind it (and produce
# # an AnimControl) when it is played
#
# self.__animControlDict[lod][partName][animName] = Actor.AnimDef(filename)
def loadAnimsOnAllLODs(self, anims,partName="modelRoot"):
"""loadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim loader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults
to 'lodRoot' for non-LOD actors) and dict of corresponding
anims in the form animName:animPath{}
"""
if self.mergeLODBundles:
lodNames = ['common']
else:
lodNames = self.__partBundleDict.keys()
for animName, filename in anims.items():
# make sure this lod is in anim control dict
for lod in lodNames:
# store the file path only; we will bind it (and produce
# an AnimControl) when it is played
self.__animControlDict[lod][partName][animName]= Actor.AnimDef(filename)
def postFlatten(self):
"""Call this after performing an aggressive flatten operation,
such as flattenStrong(), that involves the Actor. This is
especially necessary when mergeLODBundles is true, since this
kind of actor may be broken after a flatten operation; this
method should restore proper Actor functionality. """
if self.mergeLODBundles:
# Re-merge all bundles, and restore the common bundle map.
self.__commonBundleHandles = {}
for lodName, bundleDict in self.__partBundleDict.items():
for partName, partDef in bundleDict.items():
loadedBundleHandle = self.__commonBundleHandles.get(partName, None)
node = partDef.partBundleNP.node()
if loadedBundleHandle:
node.mergeBundles(partDef.partBundleHandle, loadedBundleHandle)
partDef.partBundleHandle = loadedBundleHandle
else:
self.__commonBundleHandles[partName] = partDef.partBundleHandle
# Since we may have merged together some bundles, all of
# our anims are now suspect. Force them to reload.
self.unloadAnims()
def unloadAnims(self, anims=None, partName=None, lodName=None):
"""unloadAnims(self, string:string{}, string='modelRoot',
string='lodRoot')
Actor anim unloader. Takes an optional partName (defaults to
'modelRoot' for non-multipart actors) and lodName (defaults to
'lodRoot' for non-LOD actors) and list of animation
names. Deletes the anim control for the given animation and
parts/lods.
If any parameter is None or omitted, it means all of them.
"""
assert Actor.notify.debug("in unloadAnims: %s, part: %s, lod: %s" %
(anims, partName, lodName))
if lodName == None or self.mergeLODBundles:
lodNames = self.__animControlDict.keys()
else:
lodNames = [lodName]
if (partName == None):
if len(lodNames) > 0:
partNames = self.__animControlDict[lodNames[0]].keys()
else:
partNames = []
else:
partNames = [partName]
if (anims==None):
for lodName in lodNames:
for partName in partNames:
for animDef in self.__animControlDict[lodName][partName].values():
if animDef.animControl != None:
# Try to clear any control effects before we let
# our handle on them go. This is especially
# important if the anim control was blending
# animations.
animDef.animControl.getPart().clearControlEffects()
animDef.animControl = None
else:
for lodName in lodNames:
for partName in partNames:
for anim in anims:
animDef = self.__animControlDict[lodName][partName].get(anim)
if animDef and animDef.animControl != None:
# Try to clear any control effects before we let
# our handle on them go. This is especially
# important if the anim control was blending
# animations.
animDef.animControl.getPart().clearControlEffects()
animDef.animControl = None
def bindAnim(self, animName, partName = None, lodName = None,
allowAsyncBind = False):
"""
Binds the named animation to the named part and/or lod. If
allowAsyncBind is False, this guarantees that the animation is
bound immediately--the animation is never bound in a
sub-thread; it will be loaded and bound in the main thread, so
it will be available by the time this method returns.
The parameters are the same as that for getAnimControls(). In
fact, this method is a thin wrapper around that other method.
Use this method if you need to ensure that an animation is
available before you start to play it, and you don't mind
holding up the render for a frame or two until the animation
is available.
"""
self.getAnimControls(animName = animName, partName = partName,
lodName = lodName,
allowAsyncBind = allowAsyncBind)
def bindAllAnims(self, allowAsyncBind = False):
"""Loads and binds all animations that have been defined for
the Actor. """
self.getAnimControls(animName = True, allowAsyncBind = allowAsyncBind)
def waitPending(self, partName = None):
"""Blocks until all asynchronously pending animations (that
are currently playing) have been loaded and bound the the
Actor. Call this after calling play() if you are using
asynchronous binds, but you need this particular animation
to be loaded immediately. """
for bundle in self.getPartBundles(partName = partName):
bundle.waitPending()
def __bindAnimToPart(self, animName, partName, lodName,
allowAsyncBind = True):
"""
Binds the named animation to the named part/lod and returns
the associated animControl. The animation is loaded and bound
in a sub-thread, if allowAsyncBind is True,
self.allowAsyncBind is True, threading is enabled, and the
animation has a preload table generated for it (e.g. via
"egg-optchar -preload"). Even though the animation may or may
not be yet bound at the time this function returns, a usable
animControl is returned, or None if the animation could not be
bound.
"""
# make sure this anim is in the dict
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDict = self.__animControlDict[lodName]
animDict = partDict.get(partName)
if animDict == None:
# It must be a subpart that hasn't been bound yet.
animDict = {}
partDict[partName] = animDict
anim = animDict.get(animName)
if anim == None:
# It must be a subpart that hasn't been bound yet.
anim = partDict[subpartDef.truePartName].get(animName)
anim = anim.makeCopy()
animDict[animName] = anim
if anim == None:
Actor.notify.error("actor has no animation %s", animName)
# only bind if not already bound!
if anim.animControl:
return anim.animControl
if self.mergeLODBundles:
bundle = self.__commonBundleHandles[subpartDef.truePartName].getBundle()
else:
bundle = self.__partBundleDict[lodName][subpartDef.truePartName].getBundle()
if anim.animBundle:
# We already have a bundle; just bind it.
animControl = bundle.bindAnim(anim.animBundle, -1, subpartDef.subset)
else:
# Load and bind the anim. This might be an asynchronous
# operation that will complete in the background, but if so it
# will still return a usable AnimControl.
animControl = bundle.loadBindAnim(
loader.loader, Filename(anim.filename), -1,
subpartDef.subset, allowAsyncBind and self.allowAsyncBind)
if not animControl:
# Couldn't bind. (This implies the binding operation was
# not attempted asynchronously.)
return None
# store the animControl
anim.animControl = animControl
assert Actor.notify.debug("binding anim: %s to part: %s, lod: %s" %
(animName, partName, lodName))
return animControl
def __copyPartBundles(self, other):
"""__copyPartBundles(self, Actor)
Copy the part bundle dictionary from another actor as this
instance's own. NOTE: this method does not actually copy geometry
"""
for lodName in other.__partBundleDict.keys():
# find the lod Asad
if lodName == 'lodRoot':
partLod = self
else:
partLod = self.__LODNode.find(str(lodName))
if partLod.isEmpty():
Actor.notify.warning("no lod named: %s" % (lodName))
return None
for partName, partDef in other.__partBundleDict[lodName].items():
# We can really only copy from a non-flattened avatar.
assert partDef.partBundleNP.node().getNumBundles() == 1
# find the part in our tree
bundleNP = partLod.find("**/%s%s"%(Actor.partPrefix,partName))
if (bundleNP != None):
# store the part bundle
self.__prepareBundle(bundleNP, partDef.partModel,
partName, lodName)
else:
Actor.notify.error("lod: %s has no matching part: %s" %
(lodName, partName))
def __copySubpartDict(self, other):
"""Copies the subpartDict from another as this instance's own.
This makes a deep copy of the map and all of the names and
PartSubset objects within it. We can't use copy.deepcopy()
because of the included C++ PartSubset objects."""
self.__subpartDict = {}
for partName, subpartDef in other.__subpartDict.items():
subpartDefCopy = subpartDef
if subpartDef:
subpartDef = subpartDef.makeCopy()
self.__subpartDict[partName] = subpartDef
def __copyAnimControls(self, other):
"""__copyAnimControls(self, Actor)
Get the anims from the anim control's in the anim control
dictionary of another actor. Bind these anim's to the part
bundles in our part bundle dict that have matching names, and
store the resulting anim controls in our own part bundle dict"""
assert(other.mergeLODBundles == self.mergeLODBundles)
for lodName in other.__animControlDict.keys():
self.__animControlDict[lodName] = {}
for partName in other.__animControlDict[lodName].keys():
self.__animControlDict[lodName][partName] = {}
for animName in other.__animControlDict[lodName][partName].keys():
anim = other.__animControlDict[lodName][partName][animName]
anim = anim.makeCopy()
self.__animControlDict[lodName][partName][animName] = anim
def actorInterval(self, *args, **kw):
from direct.interval import ActorInterval
return ActorInterval.ActorInterval(self, *args, **kw)
def getAnimBlends(self, animName=None, partName=None, lodName=None):
""" Returns a list of the form:
[ (lodName, [(animName, [(partName, effect), (partName, effect), ...]),
(animName, [(partName, effect), (partName, effect), ...]),
...]),
(lodName, [(animName, [(partName, effect), (partName, effect), ...]),
(animName, [(partName, effect), (partName, effect), ...]),
...]),
... ]
This list reports the non-zero control effects for each
partName within a particular animation and LOD. """
result = []
if animName is None:
animNames = self.getAnimNames()
else:
animNames = [animName]
if lodName is None:
lodNames = self.getLODNames()
if self.mergeLODBundles:
lodNames = lodNames[:1]
else:
lodNames = [lodName]
if partName == None and self.__subpartsComplete:
partNames = self.__subpartDict.keys()
else:
partNames = [partName]
for lodName in lodNames:
animList = []
for animName in animNames:
blendList = []
for partName in partNames:
control = self.getAnimControl(animName, partName, lodName)
if control:
part = control.getPart()
effect = part.getControlEffect(control)
if effect > 0.:
blendList.append((partName, effect))
if blendList:
animList.append((animName, blendList))
if animList:
result.append((lodName, animList))
return result
def printAnimBlends(self, animName=None, partName=None, lodName=None):
for lodName, animList in self.getAnimBlends(animName, partName, lodName):
print 'LOD %s:' % (lodName)
for animName, blendList in animList:
list = []
for partName, effect in blendList:
list.append('%s:%.3f' % (partName, effect))
print ' %s: %s' % (animName, ', '.join(list))
def osdAnimBlends(self, animName=None, partName=None, lodName=None):
if not onScreenDebug.enabled:
return
# puts anim blending info into the on-screen debug panel
if animName is None:
animNames = self.getAnimNames()
else:
animNames = [animName]
for animName in animNames:
if animName is 'nothing':
continue
thisAnim = ''
totalEffect = 0.
controls = self.getAnimControls(animName, partName, lodName)
for control in controls:
part = control.getPart()
name = part.getName()
effect = part.getControlEffect(control)
if effect > 0.:
totalEffect += effect
thisAnim += ('%s:%.3f, ' % (name, effect))
thisAnim += "\n"
for control in controls:
part = control.getPart()
name = part.getName()
rate = control.getPlayRate()
thisAnim += ('%s:%.1f, ' % (name, rate))
# don't display anything if this animation is not being played
itemName = 'anim %s' % animName
if totalEffect > 0.:
onScreenDebug.add(itemName, thisAnim)
else:
if onScreenDebug.has(itemName):
onScreenDebug.remove(itemName)
# these functions compensate for actors that are modeled facing the viewer but need
# to face away from the camera in the game
def faceAwayFromViewer(self):
self.getGeomNode().setH(180)
def faceTowardsViewer(self):
self.getGeomNode().setH(0)
def renamePartBundles(self, partName, newBundleName):
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
for partBundleDict in self.__partBundleDict.values():
partDef=partBundleDict.get(subpartDef.truePartName)
partDef.getBundle().setName(newBundleName)
| {
"repo_name": "toontownfunserver/Panda3D-1.9.0",
"path": "direct/actor/Actor.py",
"copies": "2",
"size": "106332",
"license": "bsd-3-clause",
"hash": 7853209984678658000,
"line_mean": 40.5359375,
"line_max": 116,
"alpha_frac": 0.5694334725,
"autogenerated": false,
"ratio": 4.406265539532571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5975699012032571,
"avg_score": null,
"num_lines": null
} |
# ----------- Actor ------------
# Actors represent interactable characters within the game world.
# ----------------------------
# -------- Imports --------
from vector import Vector
# -------- Actor --------
# Base abstract actor
class Actor( ):
# -------- Init --------
# Constructor
#
# @return Actor
def __init__( self ):
self.sprite = None
self.vector = Vector( 0,0 )
def setSprite( self, sprite ):
self.sprite = sprite
def update( self, frameTime, lifeTime ):
self.sprite.vector = self.vector
# ----------- Moveable Actor -----------
# An actor that can be repositioned after instantiation.
class MoveableActor( Actor ):
def move( self, vector ):
self.moveVector = vector
def update( self, frameTime, lifeTime ):
self.vector = self.vector.add( self.moveVector )
Actor.update( self, frameTime, lifeTime )
# ----------- Controllable Actor -----------
# An actor that can be positioned.
class ControllableActor( MoveableActor ):
def setSpeed( self, speed ):
self.speed = speed
# ----------- Set Target -----------
# Set the actor's target
#
# @param Vector vector
# @return None
def setTargetVector( self, vector ):
self.targetVector = vector
def update( self, frameTime, lifeTime ):
pass | {
"repo_name": "lsjroberts/7d7g",
"path": "framework/actor.py",
"copies": "1",
"size": "1360",
"license": "mit",
"hash": -7072029499023996000,
"line_mean": 22.0677966102,
"line_max": 65,
"alpha_frac": 0.5529411765,
"autogenerated": false,
"ratio": 4.108761329305136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5161702505805136,
"avg_score": null,
"num_lines": null
} |
# ----------- Actor ------------
# Actors represent interactable characters within the game world.
# ----------------------------
# -------- Imports --------
import pygame, config
from app import UpdateableGameObject
from event import EventListener, PygameEvent
from vector import Vector
# -------- Actor --------
# Base abstract actor
class Actor( UpdateableGameObject ):
# -------- Init --------
# Constructor
#
# @return Actor
def __init__( self ):
UpdateableGameObject.__init__( self )
self.sprite = None
self.vector = Vector( 0,0 )
def setSprite( self, sprite ):
self.sprite = sprite
sprite.setActor( self )
def kill( self ):
self.sprite.kill( )
self.removeGameObject( )
def update( self, frameTime, lifeTime ):
self.sprite.vector = self.vector
# ----------- Moveable Actor -----------
# An actor that can be repositioned after instantiation.
class MoveableActor( Actor ):
def __init__( self ):
Actor.__init__( self )
self.moveVector = Vector( 0, 0 )
def move( self, vector ):
self.moveVector = vector
def moveLeft( self ):
self.move( self.moveVector.add(
Vector( -self.speed, 0 )
))
def moveRight( self ):
self.move( self.moveVector.add(
Vector( self.speed, 0 )
))
def moveUp( self ):
self.move( self.moveVector.add(
Vector( 0, -self.speed )
))
def moveDown( self ):
self.move( self.moveVector.add(
Vector( 0, self.speed )
))
def stopLeft( self ):
self.move( self.moveVector.add(
Vector( self.speed, 0 )
))
def stopRight( self ):
self.move( self.moveVector.add(
Vector( -self.speed, 0 )
))
def stopUp( self ):
self.move( self.moveVector.add(
Vector( 0, self.speed )
))
def stopDown( self ):
self.move( self.moveVector.add(
Vector( 0, -self.speed )
))
def update( self, frameTime, lifeTime ):
self.vector.add( self.moveVector )
Actor.update( self, frameTime, lifeTime )
# ----------- Controllable Actor -----------
# An actor that can be positioned.
class ControllableActor( MoveableActor ):
def __init__( self ):
MoveableActor.__init__( self )
self.speed = 0
self.targetVector = Vector( 0, 0 )
self.controls = []
config.app.events.registerListener( ActorListener(self) )
def setSpeed( self, speed ):
self.speed = speed
# ----------- Set Target -----------
# Set the actor's target
#
# @param Vector vector
# @return None
def setTargetVector( self, vector ):
self.targetVector = vector
def addControl( self, key, callback, keyType ):
self.controls.append({
'type': keyType,
'key': key,
'callback': callback
})
def addControlDown( self, key, callback ):
self.addControl( key, callback, pygame.KEYDOWN )
def addControlUp( self, key, callback ):
self.addControl( key, callback, pygame.KEYUP )
def update( self, frameTime, lifeTime ):
MoveableActor.update( self, frameTime, lifeTime )
# ----------- AI Actor -----------
# Actor controlled by the computer
class AIActor( MoveableActor ):
pass
class KillableActor( Actor ):
def setHealth( self, health ):
self.health = health
def takeDamage( self, damage ):
self.health -= damage
if self.health < 0:
self.die( )
def die( self ):
self.kill( )
# ----------- Actor Listener -----------
# Listener with associated actor
class ActorListener( EventListener ):
def __init__( self, actor ):
self.actor = actor
def notify( self, event ):
if isinstance( event, PygameEvent ):
for control in self.actor.controls:
if control['type'] == event.data.type and control['key'] == event.data.key:
control['callback']() | {
"repo_name": "lsjroberts/7d7g",
"path": "on-hold-dung-beetle-dating/app/actor.py",
"copies": "2",
"size": "4121",
"license": "mit",
"hash": -3478558644387631000,
"line_mean": 23.2470588235,
"line_max": 91,
"alpha_frac": 0.5491385586,
"autogenerated": false,
"ratio": 3.993217054263566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02303170541243176,
"num_lines": 170
} |
#: Actor name used to register controller
SERVE_CONTROLLER_NAME = "SERVE_CONTROLLER_ACTOR"
#: Actor name used to register HTTP proxy actor
SERVE_PROXY_NAME = "SERVE_PROXY_ACTOR"
#: HTTP Address
DEFAULT_HTTP_ADDRESS = "http://127.0.0.1:8000"
#: HTTP Host
DEFAULT_HTTP_HOST = "127.0.0.1"
#: HTTP Port
DEFAULT_HTTP_PORT = 8000
#: Max concurrency
ASYNC_CONCURRENCY = int(1e6)
#: Max time to wait for HTTP proxy in `serve.start()`.
HTTP_PROXY_TIMEOUT = 60
#: Default histogram buckets for latency tracker.
DEFAULT_LATENCY_BUCKET_MS = [
1,
2,
5,
10,
20,
50,
100,
200,
500,
1000,
2000,
5000,
]
#: Name of backend reconfiguration method implemented by user.
BACKEND_RECONFIGURE_METHOD = "reconfigure"
#: Internally reserved version tag that cannot be used by applications.
# TODO(edoakes): this should be removed when we remove the old codepath.
RESERVED_VERSION_TAG = "__serve_version__"
#: All defined HTTP methods.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
ALL_HTTP_METHODS = [
"GET", "HEAD", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE",
"PATCH"
]
| {
"repo_name": "ray-project/ray",
"path": "python/ray/serve/constants.py",
"copies": "1",
"size": "1138",
"license": "apache-2.0",
"hash": 7028911325809912000,
"line_mean": 21.76,
"line_max": 74,
"alpha_frac": 0.6810193322,
"autogenerated": false,
"ratio": 3.1349862258953167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43160055580953166,
"avg_score": null,
"num_lines": null
} |
""" Actor proxy for rodario framework """
# stdlib
import types
import pickle
from multiprocessing import Queue
from threading import Thread
from uuid import uuid4
from time import sleep
# local
from rodario import get_redis_connection
from rodario.future import Future
from rodario.exceptions import InvalidActorException, InvalidProxyException
class ActorProxy(object): # pylint: disable=R0903
""" Proxy object that fires calls to an actor over redis pubsub """
def __init__(self, actor=None, uuid=None):
"""
Initialize instance of ActorProxy.
Accepts either an Actor object to clone or a UUID, but not both.
:param rodario.actors.Actor actor: Actor to clone
:param str uuid: UUID of Actor to clone
"""
#: Redis connection
self._redis = get_redis_connection()
#: Redis PubSub client
self._pubsub = None
#: This proxy object's UUID for creating unique channels
self.proxyid = str(uuid4())
#: Response queues for sandboxing method calls
self._response_queues = {}
# avoid cyclic import
actor_module = __import__('rodario.actors', fromlist=('Actor',))
# pylint: disable=E1123
self._pubsub = self._redis.pubsub(ignore_subscribe_messages=True)
self._pubsub.subscribe(**{'proxy:%s' % self.proxyid: self._handler})
methods = set()
def pubsub_thread():
""" Call get_message in loop to fire _handler. """
try:
while self._pubsub:
self._pubsub.get_message()
sleep(0.001)
except: # pylint: disable=W0702
pass
# fire up the message handler thread as a daemon
proc = Thread(target=pubsub_thread)
proc.daemon = True
proc.start()
if isinstance(actor, actor_module.Actor):
# proxying an Actor directly
self.uuid = actor.uuid
methods = actor._get_methods() # pylint: disable=W0212
elif isinstance(uuid, str):
# proxying by UUID; get actor methods over pubsub
self.uuid = uuid
methods = self._proxy('_get_methods').get()
else:
raise InvalidProxyException('No actor or UUID provided')
def get_lambda(name):
"""
Generate a lambda function to proxy the given method.
:param str name: Name of the method to proxy
:rtype: :expression:`lambda`
"""
return lambda _, *args, **kwargs: self._proxy(name, *args, **kwargs)
# create proxy methods for each public method of the original Actor
for name in methods:
setattr(self, name, types.MethodType(get_lambda(name), self))
def _handler(self, message):
"""
Handle message response via Queue object.
:param tuple message: The message to dissect
"""
# throw its value in the associated response queue
data = pickle.loads(message['data'])
self._response_queues[data[0]].put(data[1])
self._response_queues.pop(data[0])
def _proxy(self, method_name, *args, **kwargs):
"""
Proxy a method call to redis pubsub.
This method is not meant to be called directly. Instead, it is used
by the proxy's self-generated methods to provide the proxy with the
same public API as the actor it represents.
:param str method_name: The method to proxy
:param tuple args: The arguments to pass
:param dict kwargs: The keyword arguments to pass
:rtype: :class:`multiprocessing.Queue`
"""
# fire off the method call to the original Actor over pubsub
uuid = str(uuid4())
count = self._redis.publish('actor:%s' % self.uuid,
pickle.dumps((uuid, self.proxyid,
method_name, args, kwargs,)))
if count == 0:
raise InvalidActorException('No such actor')
queue = Queue()
self._response_queues[uuid] = queue
return Future(queue)
| {
"repo_name": "haliphax/rodario",
"path": "rodario/actors/proxy.py",
"copies": "1",
"size": "4183",
"license": "mit",
"hash": -6689887794378797000,
"line_mean": 32.464,
"line_max": 80,
"alpha_frac": 0.5943103036,
"autogenerated": false,
"ratio": 4.5026910656620025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00027654320987654325,
"num_lines": 125
} |
""" actor.py
The actor is the intelligent portion of our DSM implementation. The nodes
are just network-enabled dictionaries, it is the actor that knows how to
perform the replication.
"""
import itertools
import math
import uuid
# An Item is a value, tag, creator id tuple
def Item(value, tag, creator):
return (value, tag, creator)
# A Message is an id, command, item tuple
def Message(id, command, key, item):
return (id, command, key, item)
class DSMCommand(object):
""" A record of an outstanding DSM command
"""
def __init__(self, quora, key, completion_callback):
self.key = key
self._quora = quora
self._responses = {}
self._completion_callback = completion_callback
def add_response(self, address, item):
self._responses[address] = item
quorum = self.is_complete()
if quorum:
values = [self._responses[node] for node in quorum]
self._process_values(values)
def is_complete(self):
for quorum in self._quora:
if all(node in self._responses for node in quorum):
return quorum
def _process_values(self, values):
if self._completion_callback:
if all(item == 'OK' for item in values):
self._completion_callback(self.key, "OK")
else:
current_item = None
for item in values:
if current_item:
if item[1:] > current_item[1:]:
current_item = item
else:
current_item = item
self._completion_callback(self.key, tuple(current_item))
class DSMActor(object):
@staticmethod
def make_quora(nodes):
nodes_per_quora = len(nodes) / 2 + 1
quora_count = len(nodes)
quora = [[] for _ in range(quora_count)]
for q, quorum in enumerate(quora):
for i in range(q, q + nodes_per_quora):
quorum.append(nodes[i % len(nodes)])
return quora
def __init__(self, address, nodes, messenger_factory):
self._id = uuid.getnode()
self._messenger = messenger_factory(address, self._received_message)
self._nodes = nodes
for node in nodes:
self._messenger.add_node(node)
self._quora = self.make_quora(nodes)
self._outstanding_commands = {}
self._messenger.start()
def _received_message(self, address, message):
message_id, item = message
if message_id in self._outstanding_commands:
self._outstanding_commands[message_id].add_response(address, item)
if self._outstanding_commands[message_id].is_complete():
del self._outstanding_commands[message_id]
@property
def quora(self):
return self._quora
def stop(self):
self._messenger.stop()
def read(self, key, callback):
self._read(key, callback)
def _read(self, key, callback):
message_id = str(uuid.uuid4())
message = Message(message_id, 'get', key, None)
self._outstanding_commands[message_id] = DSMCommand(self._quora,
key,
self._propagate(callback))
self._messenger.send(message)
def _propagate(self, callback):
def _complete(key, item):
def _do_callback(key, result):
callback(key, result)
self._write(key, item, _do_callback)
return _complete
def write(self, key, value, callback):
def _increment_tag_and_write(key, item):
old_value, tag, creator = item
new_item = Item(value, tag+1, self._id)
self._write(key, new_item, callback)
self._read(key, _increment_tag_and_write)
def _write(self, key, item, callback):
message_id = str(uuid.uuid4())
message = Message(message_id, 'set', key, item)
self._outstanding_commands[message_id] = DSMCommand(self._quora,
key,
callback)
self._messenger.send(message)
| {
"repo_name": "desmaj/dish",
"path": "dish/actor.py",
"copies": "1",
"size": "4375",
"license": "mit",
"hash": -3011497030232617500,
"line_mean": 32.3969465649,
"line_max": 86,
"alpha_frac": 0.5426285714,
"autogenerated": false,
"ratio": 4.186602870813397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014729465448744917,
"num_lines": 131
} |
""" Actor registry for rodario framework """
# local
from rodario import get_redis_connection
from rodario.exceptions import RegistrationException
# pylint: disable=C1001
class _RegistrySingleton(object):
""" Singleton for actor registry """
def __init__(self, prefix=None):
"""
Initialize the registry.
:param str prefix: Optional prefix for redis key names
"""
self._redis = get_redis_connection()
self._list = '{prefix}actors'.format(prefix=prefix)
@property
def actors(self):
"""
Retrieve a list of registered actors.
:rtype: :class:`set`
"""
return self._redis.smembers(self._list)
def register(self, uuid):
"""
Register a new actor.
:param str uuid: The UUID of the actor to register
"""
if self._redis.sadd(self._list, uuid) == 0:
raise RegistrationException('Failed adding member to set')
def unregister(self, uuid):
"""
Unregister an existing actor.
:param str uuid: The UUID of the actor to unregister
"""
self._redis.srem(self._list, uuid)
def exists(self, uuid):
"""
Test whether an actor exists in the registry.
:param str uuid: UUID of the actor to check for
:rtype: :class:`bool`
"""
return self._redis.sismember(self._list, uuid) == 1
# pylint: disable=R0201
def get_proxy(self, uuid):
"""
Return an ActorProxy for the given UUID.
:param str uuid: The UUID to return a proxy object for
:rtype: :class:`rodario.actors.ActorProxy`
"""
# avoid cyclic import
proxy_module = __import__('rodario.actors',
fromlist=('ActorProxy',))
return proxy_module.ActorProxy(uuid=uuid)
# pylint: disable=R0903
class Registry(object):
""" Actor registry class (singleton wrapper) """
_instance = None
def __new__(cls, prefix=None):
"""
Retrieve the singleton instance for Registry.
:param str prefix: Optional prefix for redis key names
:rtype: :class:`rodario.registry._RegistrySingleton`
"""
if not cls._instance:
cls._instance = _RegistrySingleton(prefix=prefix)
return cls._instance
| {
"repo_name": "haliphax/rodario",
"path": "rodario/registry.py",
"copies": "1",
"size": "2356",
"license": "mit",
"hash": 8727413672270164000,
"line_mean": 23.5416666667,
"line_max": 70,
"alpha_frac": 0.5891341256,
"autogenerated": false,
"ratio": 4.371057513914657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5460191639514657,
"avg_score": null,
"num_lines": null
} |
"""Actors are tools for rapidly adding multiprocessing behavior to your game."""
import greenlet
import spyral
class Actor(object):
"""
Actors are a powerful mechanism for quickly adding multiprocessing behavior
to your game through `Greenlets <http://greenlet.readthedocs.org/>`_ .
Any object that subclasses the Actor
mixin can implement a `main` method that will run concurrently. You can put
a non-terminating loop into it, and it will work like magic, allowing
other actors and the main game itself to keep processing::
class MyActor(spyral.Actor):
def main(self, delta):
while True:
print "Acting!"
When an instance of the above class is created in a scene, it will
continuously print "Acting!" until the scene ends. Like a Sprite, An Actor
belongs to the Scene that was currently active when it was created.
"""
def __init__(self):
self._greenlet = greenlet.greenlet(self.main)
scene = spyral._get_executing_scene()
scene._register_actor(self, self._greenlet)
def wait(self, delta=0):
"""
Switches execution from this Actor for *delta* frames to the other
Actors. Returns the amount of time that this actor was left waiting.
:param delta: the number of frames(?) to wait.
:type delta: number
:rtype: float
"""
if delta == 0:
return self._greenlet.parent.switch(True)
return self._greenlet.parent.switch(delta)
def run_animation(self, animation):
"""
Run this animation, without blocking other Actors, until the animation
completes.
"""
progress = 0.0
delta = 0.0
while progress < animation.duration:
progress += delta
if progress > animation.duration:
extra = progress - animation.duration
progress = animation.duration
else:
extra = 0
values = animation.evaluate(self, progress)
for property in animation.properties:
if property in values:
setattr(self, property, values[property])
delta = self.wait(extra)
def main(self, delta):
"""
The main function is executed continuously until either the program
ends or the main function ends. While the Actor's scene is not on the
top of the stack, the Actor is paused; it will continue when the Scene
is back on the top of the Directory's stack.
:param float delta: The amount of time that has passed since this
method was last invoked.
"""
pass
| {
"repo_name": "danShumway/python_math",
"path": "source/PythonMath.activity/libraries/spyral/spyral/actor.py",
"copies": "2",
"size": "2766",
"license": "mit",
"hash": 7654016349168394000,
"line_mean": 36.8904109589,
"line_max": 80,
"alpha_frac": 0.6099060014,
"autogenerated": false,
"ratio": 4.696095076400679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6306001077800679,
"avg_score": null,
"num_lines": null
} |
"""Actors communicate with each other by sending and receiving messages.
The :mod:`pulsar.async.mailbox` module implements the message passing layer
via a bidirectional socket connections between the :class:`.Arbiter`
and any :class:`.Actor`.
Message sending is asynchronous and safe, the message is guaranteed to
eventually reach the recipient, provided that the recipient exists.
The implementation details are outlined below:
* Messages are sent via the :func:`.send` function, which is a proxy for
the actor :meth:`~.Actor.send` method.
Here is how you ping actor ``abc`` in a coroutine::
from pulsar.api import send
async def example():
result = await send('abc', 'ping')
* The :class:`.Arbiter` :attr:`~pulsar.Actor.mailbox` is a :class:`.TcpServer`
accepting connections from remote actors.
* The :attr:`.Actor.mailbox` is a :class:`.MailboxClient` of the arbiter
mailbox server.
* When an actor sends a message to another actor, the arbiter mailbox behaves
as a proxy server by routing the message to the targeted actor.
* Communication is bidirectional and there is **only one connection** between
the arbiter and any given actor.
* Messages are encoded and decoded using the unmasked websocket protocol
implemented in :func:`.frame_parser`.
* If, for some reasons, the connection between an actor and the arbiter
get broken, the actor will eventually stop running and garbaged collected.
Implementation
=========================
For the curious this is how the internal protocol is implemented
Protocol
~~~~~~~~~~~~
.. autoclass:: MessageConsumer
:members:
:member-order: bysource
Client
~~~~~~~~~~~~
.. autoclass:: MailboxClient
:members:
:member-order: bysource
"""
import socket
import pickle
import logging
from functools import partial
from collections import namedtuple
from inspect import isawaitable
from ..utils.exceptions import CommandError
from ..utils.internet import nice_address
from ..utils.websocket import frame_parser
from ..utils.string import gen_unique_id
from ..utils.lib import ProtocolConsumer
from .protocols import Connection
from .access import get_actor
from .proxy import actor_identity, get_proxy, get_command, ActorProxy
from .clients import AbstractClient
CommandRequest = namedtuple('CommandRequest', 'actor caller connection')
LOGGER = logging.getLogger('pulsar.mailbox')
def create_aid():
return gen_unique_id()[:8]
async def command_in_context(command, caller, target, args, kwargs,
connection=None):
cmnd = get_command(command)
if not cmnd:
raise CommandError('unknown %s' % command)
request = CommandRequest(target, caller, connection)
result = cmnd(request, args, kwargs)
try:
result = await result
except TypeError:
if isawaitable(result):
raise
return result
class ProxyMailbox:
'''A proxy for the arbiter :class:`Mailbox`.
'''
active_connections = 0
def __init__(self, actor):
mailbox = actor.monitor.mailbox
if isinstance(mailbox, ProxyMailbox):
mailbox = mailbox.mailbox
self.mailbox = mailbox
def __repr__(self):
return self.mailbox.__repr__()
def __str__(self):
return self.mailbox.__str__()
def __getattr__(self, name):
return getattr(self.mailbox, name)
def _run(self):
pass
def close(self):
pass
class MessageConsumer(ProtocolConsumer):
"""Protocol Consumer for Actor messages
"""
tasks = None
parser = None
worker = None
parser = None
debug = False
pending_responses = None
def start_request(self):
actor = get_actor()
self.parser = frame_parser(kind=2)
self.pending_responses = {}
self.tasks = {}
self.logger = actor.logger
self.debug = actor.cfg.debug
def feed_data(self, data):
msg = self.parser.decode(data)
while msg:
try:
message = pickle.loads(msg.body)
except Exception:
self.logger.exception('could not decode message body')
else:
# Avoid to create a task on callbacks
if message.get('command') == 'callback':
self._on_callback(message)
else:
task = self._loop.create_task(self._on_message(message))
self.tasks[message['id']] = task
msg = self.parser.decode()
def send(self, command, sender, target, args, kwargs):
"""Used by the server to send messages to the client.
Returns a future.
"""
command = get_command(command)
data = {'command': command.__name__,
'id': create_aid(),
'sender': actor_identity(sender),
'target': actor_identity(target),
'args': args if args is not None else (),
'kwargs': kwargs if kwargs is not None else {}}
waiter = self._loop.create_future()
ack = None
if command.ack:
ack = create_aid()
data['ack'] = ack
self.pending_responses[ack] = waiter
try:
self.write(data)
except Exception as exc:
waiter.set_exception(exc)
if ack:
self.pending_responses.pop(ack, None)
else:
if not ack:
waiter.set_result(None)
return waiter
def write(self, msg):
obj = pickle.dumps(msg, protocol=2)
data = self.parser.encode(obj, opcode=2)
try:
self.connection.write(data)
except (socket.error, RuntimeError):
actor = get_actor()
if actor.is_running() and not actor.is_arbiter():
self.logger.warning('Lost connection with arbiter')
self._loop.stop()
def _on_callback(self, message):
ack = message.get('ack')
if not ack:
self.logger.error('A callback without id')
else:
if self.debug:
self.logger.debug('Callback from "%s"', ack)
pending = self.pending_responses.pop(ack)
pending.set_result(message.get('result'))
async def _on_message(self, message):
try:
actor = get_actor()
command = message.get('command')
ack = message.get('ack')
try:
if self.debug:
self.logger.debug('Got message "%s"', command)
target = actor.get_actor(message['target'])
if target is None:
raise CommandError(
'cannot execute "%s", unknown actor '
'"%s"' % (command, message['target']))
# Get the caller proxy without throwing
caller = get_proxy(actor.get_actor(message['sender']),
safe=True)
if isinstance(target, ActorProxy):
# route the message to the actor proxy
if caller is None:
raise CommandError(
"'%s' got message from unknown '%s'"
% (actor, message['sender']))
result = await actor.send(target, command,
*message['args'],
**message['kwargs'])
else:
result = await command_in_context(command, caller,
target,
message['args'],
message['kwargs'],
self)
except CommandError as exc:
self.logger.warning('Command error: %s' % exc)
result = None
except Exception:
self.logger.exception('Unhandled exception')
result = None
if ack:
data = {'command': 'callback', 'result': result, 'ack': ack}
self.write(data)
finally:
self.tasks.pop(message['id'], None)
mailbox_protocol = partial(Connection, MessageConsumer)
class MailboxClient(AbstractClient):
"""Used by actors to send messages to other actors via the arbiter.
"""
def __init__(self, address, actor, loop):
super().__init__(mailbox_protocol, loop=loop,
name='%s-mailbox' % actor, logger=LOGGER)
self.address = address
self.connection = None
def connect(self):
return self.create_connection(self.address)
def __repr__(self):
return '%s %s' % (self.name, nice_address(self.address))
async def send(self, command, sender, target, args, kwargs):
if self.connection is None:
self.connection = await self.connect()
consumer = self.connection.current_consumer()
self.connection.event('connection_lost').bind(self._lost)
consumer.start()
else:
consumer = self.connection.current_consumer()
response = await consumer.send(command, sender, target, args, kwargs)
return response
def close(self):
if self.connection:
self.connection.abort()
def start_serving(self): # pragma nocover
pass
def _lost(self, _, exc=None):
# When the connection is lost, stop the event loop
if self._loop.is_running():
self._loop.stop()
| {
"repo_name": "quantmind/pulsar",
"path": "pulsar/async/mailbox.py",
"copies": "1",
"size": "9672",
"license": "bsd-3-clause",
"hash": -5219507821772261000,
"line_mean": 32.3517241379,
"line_max": 78,
"alpha_frac": 0.5712365591,
"autogenerated": false,
"ratio": 4.568729333963155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 290
} |
'''Actors communicate with each other by sending and receiving messages.
The :mod:`pulsar.async.mailbox` module implements the message passing layer
via a bidirectional socket connections between the :class:`.Arbiter`
and any :class:`.Actor`.
Message sending is asynchronous and safe, the message is guaranteed to
eventually reach the recipient, provided that the recipient exists.
The implementation details are outlined below:
* Messages are sent via the :func:`.send` function, which is a proxy for
the actor :meth:`~.Actor.send` method.
Here is how you ping actor ``abc`` in a coroutine::
from pulsar import send
async def example():
result = await send('abc', 'ping')
* The :class:`.Arbiter` :attr:`~pulsar.Actor.mailbox` is a :class:`.TcpServer`
accepting connections from remote actors.
* The :attr:`.Actor.mailbox` is a :class:`.MailboxClient` of the arbiter
mailbox server.
* When an actor sends a message to another actor, the arbiter mailbox behaves
as a proxy server by routing the message to the targeted actor.
* Communication is bidirectional and there is **only one connection** between
the arbiter and any given actor.
* Messages are encoded and decoded using the unmasked websocket protocol
implemented in :func:`.frame_parser`.
* If, for some reasons, the connection between an actor and the arbiter
get broken, the actor will eventually stop running and garbaged collected.
Implementation
=========================
For the curious this is how the internal protocol is implemented
Protocol
~~~~~~~~~~~~
.. autoclass:: MailboxProtocol
:members:
:member-order: bysource
Client
~~~~~~~~~~~~
.. autoclass:: MailboxClient
:members:
:member-order: bysource
'''
import socket
import pickle
import asyncio
from collections import namedtuple
from pulsar import ProtocolError, CommandError
from pulsar.utils.internet import nice_address
from pulsar.utils.websocket import frame_parser
from pulsar.utils.string import gen_unique_id
from .access import get_actor, isawaitable
from .futures import Future, task
from .proxy import actor_identity, get_proxy, get_command, ActorProxy
from .protocols import Protocol
from .clients import AbstractClient
CommandRequest = namedtuple('CommandRequest', 'actor caller connection')
def create_aid():
return gen_unique_id()[:8]
@asyncio.coroutine
def command_in_context(command, caller, target, args, kwargs, connection=None):
cmnd = get_command(command)
if not cmnd:
raise CommandError('unknown %s' % command)
request = CommandRequest(target, caller, connection)
result = cmnd(request, args, kwargs)
if isawaitable(result):
result = yield from result
return result
class ProxyMailbox:
'''A proxy for the arbiter :class:`Mailbox`.
'''
active_connections = 0
def __init__(self, actor):
mailbox = actor.monitor.mailbox
if isinstance(mailbox, ProxyMailbox):
mailbox = mailbox.mailbox
self.mailbox = mailbox
def __repr__(self):
return self.mailbox.__repr__()
def __str__(self):
return self.mailbox.__str__()
def __getattr__(self, name):
return getattr(self.mailbox, name)
def _run(self):
pass
def close(self):
pass
class Message:
'''A message which travels from actor to actor.
'''
def __init__(self, data, waiter=None):
self.data = data
self.waiter = waiter
def __repr__(self):
return self.data.get('command', 'unknown')
__str__ = __repr__
@classmethod
def command(cls, command, sender, target, args, kwargs):
command = get_command(command)
data = {'command': command.__name__,
'sender': actor_identity(sender),
'target': actor_identity(target),
'args': args if args is not None else (),
'kwargs': kwargs if kwargs is not None else {}}
waiter = Future()
if command.ack:
data['ack'] = create_aid()
else:
waiter.set_result(None)
return cls(data, waiter)
@classmethod
def callback(cls, result, ack):
data = {'command': 'callback', 'result': result, 'ack': ack}
return cls(data)
class MailboxProtocol(Protocol):
'''The :class:`.Protocol` for internal message passing between actors.
Encoding and decoding uses the unmasked websocket protocol.
'''
def __init__(self, **kw):
super().__init__(**kw)
self._pending_responses = {}
self._parser = frame_parser(kind=2, pyparser=True)
actor = get_actor()
if actor.is_arbiter():
self.bind_event('connection_lost', self._connection_lost)
def request(self, command, sender, target, args, kwargs):
'''Used by the server to send messages to the client.'''
req = Message.command(command, sender, target, args, kwargs)
self._start(req)
return req.waiter
def data_received(self, data):
# Feed data into the parser
msg = self._parser.decode(data)
while msg:
try:
message = pickle.loads(msg.body)
except Exception as e:
raise ProtocolError('Could not decode message body: %s' % e)
self._on_message(message)
msg = self._parser.decode()
########################################################################
# INTERNALS
def _start(self, req):
if req.waiter and 'ack' in req.data:
self._pending_responses[req.data['ack']] = req.waiter
try:
self._write(req)
except Exception as exc:
req.waiter.set_exception(exc)
else:
self._write(req)
def _connection_lost(self, _, exc=None):
if exc:
actor = get_actor()
if actor.is_running():
actor.logger.warning('Connection lost with actor.')
@task
def _on_message(self, message):
actor = get_actor()
command = message.get('command')
ack = message.get('ack')
if command == 'callback':
if not ack:
raise ProtocolError('A callback without id')
try:
pending = self._pending_responses.pop(ack)
except KeyError:
raise KeyError('Callback %s not in pending callbacks' % ack)
pending.set_result(message.get('result'))
else:
try:
target = actor.get_actor(message['target'])
if target is None:
raise CommandError('cannot execute "%s", unknown actor '
'"%s"' % (command, message['target']))
# Get the caller proxy without throwing
caller = get_proxy(actor.get_actor(message['sender']),
safe=True)
if isinstance(target, ActorProxy):
# route the message to the actor proxy
if caller is None:
raise CommandError(
"'%s' got message from unknown '%s'"
% (actor, message['sender']))
result = yield from actor.send(target, command,
*message['args'],
**message['kwargs'])
else:
result = yield from command_in_context(command, caller,
target,
message['args'],
message['kwargs'],
self)
except CommandError as exc:
self.logger.warning('Command error: %s' % exc)
result = None
except Exception as exc:
self.logger.exception('Unhandled exception')
result = None
if ack:
self._start(Message.callback(result, ack))
def _write(self, req):
obj = pickle.dumps(req.data, protocol=2)
data = self._parser.encode(obj, opcode=2)
try:
self._transport.write(data)
except socket.error:
actor = get_actor()
if actor.is_running():
if actor.is_arbiter():
raise
else:
actor.logger.warning('Lost connection with arbiter')
actor._loop.stop()
class MailboxClient(AbstractClient):
'''Used by actors to send messages to other actors via the arbiter.
'''
protocol_factory = MailboxProtocol
def __init__(self, address, actor, loop):
super().__init__(loop)
self.address = address
self.name = 'Mailbox for %s' % actor
self._connection = None
def response(self, request):
resp = super().response
self._consumer = resp(request, self._consumer, False)
return self._consumer
def connect(self):
return self.create_connection(self.address)
def __repr__(self):
return '%s %s' % (self.name, nice_address(self.address))
@task
def request(self, command, sender, target, args, kwargs):
# the request method
if self._connection is None:
self._connection = yield from self.connect()
self._connection.bind_event('connection_lost', self._lost)
req = Message.command(command, sender, target, args, kwargs)
self._connection._start(req)
response = yield from req.waiter
return response
def start_serving(self):
pass
def close(self):
if self._connection:
self._connection.close()
def _lost(self, _, exc=None):
# When the connection is lost, stop the event loop
if self._loop.is_running():
self._loop.stop()
| {
"repo_name": "dejlek/pulsar",
"path": "pulsar/async/mailbox.py",
"copies": "1",
"size": "10058",
"license": "bsd-3-clause",
"hash": -277900762088121570,
"line_mean": 32.5266666667,
"line_max": 79,
"alpha_frac": 0.5722807715,
"autogenerated": false,
"ratio": 4.520449438202247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 300
} |
"""Actors.
An actor is just a function running in a thread that processes messages
from a queue, and you interact with an actor through a stub object.
A few kinds of actor are implemented here:
* Object-based actor that interprets messages as method calls of the
object bound to this actor.
* Function-calling actor that interprets messages as function calls.
If these do not fit your need, you can always create your actor since it
is just a function.
"""
__all__ = [
'MethodCall',
'Stub',
'from_object',
'function_caller',
]
import functools
import logging
import threading
import typing
from g1.bases import classes
from g1.bases.assertions import ASSERT
from g1.bases.collections import Namespace
from . import futures
from . import queues
LOG = logging.getLogger(__name__)
NON_GRACE_PERIOD = 0.1 # Unit: seconds.
def from_object(obj, **kwargs):
"""Make a object-based actor."""
return Stub(
actor=make_method_caller(obj),
method_names=classes.get_public_method_names(obj),
**kwargs,
)
class Stub:
"""Stub for interacting with an actor."""
def __init__(
self,
*,
actor,
method_names=(),
queue=None,
name=None,
daemon=None,
):
self.future = futures.Future()
self.queue = queue if queue is not None else queues.Queue()
# Create method senders for convenience.
if method_names:
self.m = make_senders(method_names, self.queue)
self._thread = threading.Thread(
target=futures.wrap_thread_target(actor, self.future),
name=name,
args=(self.queue, ),
daemon=daemon,
)
self._thread.start()
__repr__ = classes.make_repr('{self._thread!r}')
def __enter__(self):
return self
def __exit__(self, exc_type, *_):
graceful = not exc_type
self.shutdown(graceful)
try:
self.join(None if graceful else NON_GRACE_PERIOD)
except futures.Timeout:
LOG.warning('actor join timeout: %r', self)
def shutdown(self, graceful=True):
items = self.queue.close(graceful)
if items:
LOG.warning('drop %d messages', len(items))
return items
def join(self, timeout=None):
exc = self.future.get_exception(timeout)
if exc:
LOG.error('actor crash: %r', self, exc_info=exc)
class MethodCall(typing.NamedTuple):
"""Message type for object-based actor and function-calling actor."""
method: typing.Union[str, typing.Callable]
args: tuple
kwargs: dict
future: futures.Future
def make_senders(method_names, queue):
entries = {name: _make_sender(name, queue) for name in method_names}
return Namespace(**entries)
def _make_sender(name, queue):
def sender(*args, **kwargs):
future = futures.Future()
call = MethodCall(method=name, args=args, kwargs=kwargs, future=future)
queue.put(call)
return future
return sender
#
# Object-based actor.
#
def make_method_caller(obj):
"""Make a ``method_caller`` actor."""
return functools.partial(method_caller, obj)
def method_caller(obj, queue):
"""Actor that interprets messages as method calls of an object."""
LOG.info('start')
while True:
try:
call = ASSERT.isinstance(queue.get(), MethodCall)
except queues.Closed:
break
with call.future.catching_exception(reraise=False):
method = getattr(obj, ASSERT.isinstance(call.method, str))
call.future.set_result(method(*call.args, **call.kwargs))
del call
LOG.info('exit')
#
# Function-calling actor.
#
def function_caller(queue):
"""Actor that interprets messages as function calls."""
LOG.info('start')
while True:
try:
call = ASSERT.isinstance(queue.get(), MethodCall)
except queues.Closed:
break
with call.future.catching_exception(reraise=False):
ASSERT.predicate(call.method, callable)
call.future.set_result(call.method(*call.args, **call.kwargs))
del call
LOG.info('exit')
| {
"repo_name": "clchiou/garage",
"path": "py/g1/threads/g1/threads/actors.py",
"copies": "1",
"size": "4221",
"license": "mit",
"hash": -5737325177231955000,
"line_mean": 24.125,
"line_max": 79,
"alpha_frac": 0.6244965648,
"autogenerated": false,
"ratio": 3.8831646734130634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5007661238213064,
"avg_score": null,
"num_lines": null
} |
'''
This module contains functions which run antsCorticalThickness and ROI
extractions and then uploads them to S3
'''
# Create the ACT nipype workflow
def create_workflow(wf_base_dir, input_anat, oasis_path):
'''
Method to create the nipype workflow that is executed for
preprocessing the data
Parameters
----------
wf_base_dir : string
filepath to the base directory to run the workflow
input_anat : string
filepath to the input file to run antsCorticalThickness.sh on
oasis_path : string
filepath to the oasis
Returns
-------
wf : nipype.pipeline.engine.Workflow instance
the workflow to be ran for preprocessing
'''
# Import packages
from act_interface import antsCorticalThickness
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
from nipype.interfaces.utility import Function
from nipype import logging as np_logging
from nipype import config
import os
# Init variables
oasis_trt_20 = os.path.join(oasis_path,
'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30.nii')
# Setup nipype workflow
if not os.path.exists(wf_base_dir):
os.makedirs(wf_base_dir)
wf = pe.Workflow(name='thickness_workflow')
wf.base_dir = wf_base_dir
# Init log directory
log_dir = wf_base_dir
# Define antsCorticalThickness node
thickness = pe.Node(antsCorticalThickness(), name='thickness')
# Set antsCorticalThickness inputs
thickness.inputs.dimension = 3
thickness.inputs.segmentation_iterations = 1
thickness.inputs.segmentation_weight = 0.25
thickness.inputs.input_skull = input_anat #-a
thickness.inputs.template = oasis_path + 'T_template0.nii.gz' #-e
thickness.inputs.brain_prob_mask = oasis_path + \
'T_template0_BrainCerebellumProbabilityMask.nii.gz' #-m
thickness.inputs.brain_seg_priors = oasis_path + \
'Priors2/priors%d.nii.gz' #-p
thickness.inputs.intensity_template = oasis_path + \
'T_template0_BrainCerebellum.nii.gz' #-t
thickness.inputs.extraction_registration_mask = oasis_path + \
'T_template0_BrainCerebellumExtractionMask.nii.gz' #-f
thickness.inputs.out_prefix = 'OUTPUT_' #-o
thickness.inputs.keep_intermediate_files = 0 #-k
# Node to run ANTs 3dROIStats
ROIstats = pe.Node(util.Function(input_names=['mask','thickness_normd'],
output_names=['roi_stats_file'],
function=roi_func),
name='ROIstats')
wf.connect(thickness, 'cortical_thickness_normalized',
ROIstats, 'thickness_normd')
ROIstats.inputs.mask = oasis_trt_20
# Create datasink node
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.base_directory = wf_base_dir
# Connect thickness outputs to datasink
wf.connect(thickness, 'brain_extraction_mask',
datasink, 'output.@brain_extr_mask')
wf.connect(thickness, 'brain_segmentation',
datasink, 'output.@brain_seg')
wf.connect(thickness, 'brain_segmentation_N4',
datasink, 'output.@brain_seg_N4')
wf.connect(thickness, 'brain_segmentation_posteriors_1',
datasink, 'output.@brain_seg_post_1')
wf.connect(thickness, 'brain_segmentation_posteriors_2',
datasink, 'output.@brain_seg_post_2')
wf.connect(thickness, 'brain_segmentation_posteriors_3',
datasink, 'output.@brain_seg_post_3')
wf.connect(thickness, 'brain_segmentation_posteriors_4',
datasink, 'output.@brain_seg_post_4')
wf.connect(thickness, 'brain_segmentation_posteriors_5',
datasink, 'output.@brain_seg_post_5')
wf.connect(thickness, 'brain_segmentation_posteriors_6',
datasink, 'output.@brain_seg_post_6')
wf.connect(thickness, 'cortical_thickness',
datasink, 'output.@cortical_thickness')
wf.connect(thickness, 'cortical_thickness_normalized',
datasink,'output.@cortical_thickness_normalized')
# Connect ROI stats output text file to datasink
wf.connect(ROIstats, 'roi_stats_file', datasink, 'output.@ROIstats')
# Setup crashfile directory and logging
wf.config['execution'] = {'hash_method': 'timestamp',
'crashdump_dir': '/home/ubuntu/crashes'}
config.update_config({'logging': {'log_directory': log_dir,
'log_to_file': True}})
np_logging.update_logging(config)
# Return the workflow
return wf
# Mean ROI stats function
def roi_func(mask, thickness_normd):
'''
Method to run 3dROIstats on an input image, thickness_normd, using
a mask, mask The output is written to the current working directory
as 'ROIstats.txt'
Parameters
----------
mask : string
filepath to the mask to be used
thickness_normd : string
filepath to the input image
Returns
-------
roi_stats_file : string
the filepath to the generated ROIstats.txt file
'''
# Import packages
import os
# Set command and execute
cmd = '3dROIstats -mask ' + mask + ' ' + thickness_normd + ' > ' + os.getcwd() + '/ROIstats.txt'
os.system(cmd)
# Get the output
roi_stats_file = os.path.join(os.getcwd(), 'ROIstats.txt')
# Return the filepath to the output
return roi_stats_file
# Setup log file
def setup_logger(logger_name, log_file, level, to_screen=False):
'''
Function to initialize and configure a logger that can write to file
and (optionally) the screen.
Parameters
----------
logger_name : string
name of the logger
log_file : string
file path to the log file on disk
level : integer
indicates the level at which the logger should log; this is
controlled by integers that come with the python logging
package. (e.g. logging.INFO=20, logging.DEBUG=10)
to_screen : boolean (optional)
flag to indicate whether to enable logging to the screen
Returns
-------
logger : logging.Logger object
Python logging.Logger object which is capable of logging run-
time information about the program to file and/or screen
'''
# Import packages
import logging
# Init logger, formatter, filehandler, streamhandler
logger = logging.getLogger(logger_name)
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s : %(message)s')
# Write logs to file
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Write to screen, if desired
if to_screen:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# Return the logger
return logger
# Form list of anatomical s3 keys
def return_anat_dict(bucket, prefix):
'''
Function to create and return an dictionary from an S3 bucket
prefix, where the key is the subject unique id and the value is the
S3 key filepath
Parameters
----------
bucket : boto.s3.bucket.Bucket instance
an instance of the boto S3 bucket class to download from
prefix : string
S3 bucket prefix to parse for anatomical data in
Returns
-------
key_dict : dictionary
dictionary of unique subject id's as keys and S3 key filepaths
as values
'''
# Init variables
key_list = []
key_dict = {}
# Check prefix
if not prefix.endswith('/'):
prefix = prefix + '/'
# Gather all anatomical files
for key in bucket.list(prefix=prefix):
key_name = str(key.name)
if 'anat' in key_name:
key_list.append(key_name)
print 'Adding %s to list...' % key_name
# Create subject dictionary
for key_idx, key_name in enumerate(key_list):
# Grab unique subj/session as id
key_suffix = key_name.replace(prefix, '')
subj_id = '-'.join(key_suffix.split('/')[:2])
# Add key, val to dictionary
key_dict[subj_id] = key_name
# Return dictionary
return key_dict
# Main routine
def main(index, local_dir):
'''
Function to download an anatomical dataset from S3 and process it
through ANTS antsCorticalThickness.sh script, then upload the data back
to S3
Parameters
----------
index : integer
the index of the subject to process
local_dir : string
filepath to the local directory to store the input and
processed outputs
'''
# Import packages
import boto
import logging
import os
import subprocess
import time
from CPAC.AWS import aws_utils, fetch_creds
# Init variables
creds_path = '/home/ubuntu/secure-creds/aws-keys/fcp-indi-keys2.csv'
# Oasis template paths
oasis_path = '/home/ubuntu/OASIS-30_Atropos_template/'
# Bucket and S3 dataset prefix
bucket = fetch_creds.return_bucket(creds_path, 'fcp-indi')
prefix = 'data/Projects/CORR/RawData/IBA_TRT/'
# Local dirs for working and download
dl_dir = os.path.join(local_dir, 'inputs')
# Setup logger
act_log_path = '/home/ubuntu/run_act_%d.log' % index
act_log = setup_logger('act_log', act_log_path, logging.INFO, to_screen=True)
# Make input and workdirs
if not os.path.exists(dl_dir):
os.makedirs(dl_dir)
# Get S3 anatomical paths dictionary
anat_dict = return_anat_dict(bucket, prefix)
# Get lis of unique subject ids to download
key_list = sorted(anat_dict.keys())
# Extract subject of interest
subj_id = key_list[index]
s3_path = anat_dict[subj_id]
# Init working dir
working_dir = os.path.join(local_dir, '%s_act_workdir' % subj_id)
if not os.path.exists(working_dir):
os.makedirs(working_dir)
# Download data
act_log.info('Downloading %s...' % s3_path)
s3_key = bucket.get_key(s3_path)
s3_filename = os.path.basename(s3_path)
dl_filename = os.path.join(dl_dir, subj_id, s3_filename)
# Make folders if need be
dl_dirs = os.path.dirname(dl_filename)
if not os.path.exists(dl_dirs):
os.makedirs(dl_dirs)
s3_key.get_contents_to_filename(dl_filename)
# Create the nipype workflow
act_wf = create_workflow(working_dir, dl_filename, oasis_path)
# Run the workflow
act_log.info('Running the workflow...')
# Start timing
start = time.time()
act_wf.run()
# Finish timing
fin = time.time()
act_log.info('Completed workflow!')
# Log finish and total computation time
elapsed = (fin - start)/60.0
act_log.info('Total time running is: %f minutes' % elapsed)
# Gather processed data
act_log.info('Gathering outputs for upload to S3...')
upl_list = []
for root, dirs, files in os.walk(working_dir):
if files:
upl_list.extend([os.path.join(root, fl) for fl in files])
# Update log with upload info
act_log.info('Gathered %d files for upload to S3' % len(upl_list))
# Build upload list
upl_prefix = os.path.join(prefix.replace('RawData', 'Outputs'),
'ants', subj_id)
s3_upl_list = [upl.replace(working_dir, upl_prefix) for upl in upl_list]
# Upload to S3
aws_utils.s3_upload(bucket, upl_list, s3_upl_list)
# Run main by default
if __name__ == '__main__':
# Import packages
import sys
# Init variables
index = int(sys.argv[1])-1
local_dir = sys.argv[2]
main(index, local_dir)
| {
"repo_name": "computational-neuroimaging-lab/Clark2015_AWS",
"path": "data-preproc/scripts/act_run.py",
"copies": "1",
"size": "11946",
"license": "mit",
"hash": -2274379907119019000,
"line_mean": 31.2864864865,
"line_max": 107,
"alpha_frac": 0.6334337854,
"autogenerated": false,
"ratio": 3.685899413761185,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4819333199161185,
"avg_score": null,
"num_lines": null
} |
'''Acts as an interface between what Flask serves and what goes on in
the rest of the application.
'''
from avenue import app
from avenue.database import content
from flask import render_template, make_response, redirect
from os import path
import yaml
def read_data(filename):
'''Reads in data from a given YML file and returns it in a form
usable by Python.
'''
filename = '%s.yml' % filename
data_file = open(path.join(path.dirname(__file__), 'data', filename))
data = yaml.load(data_file)
data_file.close()
return data
def make_css(theme):
'''Helper function that makes sure that the CSS served is
recognized by browsers as CSS.
'''
response = make_response(render_template('main.css', theme=theme))
response.mimetype = 'text/css'
return response
def url_generator():
'''This function acts on a list of URLs, a text rule for each URL,
and a function that says what to do to that text rule to serve a
page. The action_list associates a subset of URLs with a
particular function to be used as the action for that group.
'''
data = read_data('forum')
threads = data['threads']
content.insert_data()
themes = content.get_themes()
nav = content.get_nav()
tags = content.get_tags()
redirects = content.get_urls()
def forum_set_tags():
'''Turns strings containing tag names into tag objects that
can be used to generate HTML/CSS renderings of the tag.
'''
for thread in threads:
for post in threads[thread]['posts']:
if 'tags' in post:
for i in range(len(post['tags'])):
post['tags'][i] = tags[post['tags'][i]]
def forum_page(name):
'''Makes a forum page of the given thread name.
'''
thread = threads[name]
html = '%s :: %s :: %s' % (thread['title'], data['forum'], data['site'])
main = '%s -- %s' % (data['site'], data['forum'])
title = { 'html' : html,
'main' : main,
'thread' : thread['title'],
'url' : data['forum_url'] }
return render_template('forum.html',
style='night',
sidebar=nav,
title=title,
posts=thread['posts'],
threaded=thread['threaded'])
def setup_url_rule(urls, action):
'''Sets up URL rules, given a dictionary of urls and a
function that they will act on. It passes an anonymous
function to add_url_rule that always does a particular action
to a particular string when that URL is accessed.
'''
is_dict = type(urls) == dict
for url in urls:
text = urls[url] if is_dict else url
app.add_url_rule(url, url, (lambda x: lambda: action(x))(text))
forum_set_tags()
css = {}
for theme in themes:
css[themes[theme]['url']] = theme
setup_url_rule(redirects, redirect)
setup_url_rule(css, lambda theme: make_css(themes[theme]))
setup_url_rule(threads.keys(), forum_page)
| {
"repo_name": "Aethaeryn/avenue",
"path": "avenue/web.py",
"copies": "1",
"size": "3201",
"license": "mit",
"hash": -2802460668790026000,
"line_mean": 32.34375,
"line_max": 80,
"alpha_frac": 0.5760699781,
"autogenerated": false,
"ratio": 4.125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0030818074680220987,
"num_lines": 96
} |
"""Acts like a Pymongo client to TinyDB"""
# coding: utf-8
from __future__ import absolute_import
import copy
from functools import reduce
import logging
import os
from math import ceil
from operator import itemgetter
from uuid import uuid1
from tinydb import Query, TinyDB, where
from .results import (
InsertOneResult,
InsertManyResult,
UpdateResult,
DeleteResult
)
from .errors import DuplicateKeyError
try:
basestring
except NameError:
basestring = str
logger = logging.getLogger(__name__)
def Q(query, key):
return reduce(lambda partial_query, field: partial_query[field], key.split('.'), query)
class TinyMongoClient(object):
"""Represents the Tiny `db` client"""
def __init__(self, foldername=u"tinydb", **kwargs):
"""Initialize container folder"""
self._foldername = foldername
try:
os.mkdir(foldername)
except OSError as x:
logger.info('{}'.format(x))
@property
def _storage(self):
"""By default return Tiny.DEFAULT_STORAGE and can be overwritten to
return custom storages and middlewares.
class CustomClient(TinyMongoClient):
@property
def _storage(self):
return CachingMiddleware(OtherMiddleware(JSONMiddleware))
This property is also useful to define Serializers using required
`tinydb-serialization` module.
from tinymongo.serializers import DateTimeSerializer
from tinydb_serialization import SerializationMiddleware
class CustomClient(TinyMongoClient):
@property
def _storage(self):
serialization = SerializationMiddleware()
serialization.register_serializer(
DateTimeSerializer(), 'TinyDate')
# register other custom serializers
return serialization
"""
return TinyDB.DEFAULT_STORAGE
def __getitem__(self, key):
"""Gets a new or existing database based in key"""
return TinyMongoDatabase(key, self._foldername, self._storage)
def close(self):
"""Do nothing"""
pass
def __getattr__(self, name):
"""Gets a new or existing database based in attribute"""
return TinyMongoDatabase(name, self._foldername, self._storage)
class TinyMongoDatabase(object):
"""Representation of a Pymongo database"""
def __init__(self, database, foldername, storage):
"""Initialize a TinyDB file named as the db name in the given folder
"""
self._foldername = foldername
self.tinydb = TinyDB(
os.path.join(foldername, database + u".json"),
storage=storage
)
def __getattr__(self, name):
"""Gets a new or existing collection"""
return TinyMongoCollection(name, self)
def __getitem__(self, name):
"""Gets a new or existing collection"""
return TinyMongoCollection(name, self)
def collection_names(self):
"""Get a list of all the collection names in this database"""
return list(self.tinydb.tables())
class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
class TinyMongoCursor(object):
"""Mongo iterable cursor"""
def __init__(self, cursordat, sort=None, skip=None, limit=None):
"""Initialize the mongo iterable cursor with data"""
self.cursordat = cursordat
self.cursorpos = -1
if len(self.cursordat) == 0:
self.currentrec = None
else:
self.currentrec = self.cursordat[self.cursorpos]
if sort:
self.sort(sort)
self.paginate(skip, limit)
def __getitem__(self, key):
"""Gets record by index or value by key"""
if isinstance(key, int):
return self.cursordat[key]
return self.currentrec[key]
def paginate(self, skip, limit):
"""Paginate list of records"""
if not self.count() or not limit:
return
skip = skip or 0
pages = int(ceil(self.count() / float(limit)))
limits = {}
last = 0
for i in range(pages):
current = limit * i
limits[last] = current
last = current
# example with count == 62
# {0: 20, 20: 40, 40: 60, 60: 62}
if limit and limit < self.count():
limit = limits.get(skip, self.count())
self.cursordat = self.cursordat[skip: limit]
def _order(self, value, is_reverse=None):
"""Parsing data to a sortable form
By giving each data type an ID(int), and assemble with the value
into a sortable tuple.
"""
def _dict_parser(dict_doc):
""" dict ordered by:
valueType_N -> key_N -> value_N
"""
result = list()
for key in dict_doc:
data = self._order(dict_doc[key])
res = (data[0], key, data[1])
result.append(res)
return tuple(result)
def _list_parser(list_doc):
"""list will iter members to compare
"""
result = list()
for member in list_doc:
result.append(self._order(member))
return result
# (TODO) include more data type
if value is None or not isinstance(value, (dict,
list,
basestring,
bool,
float,
int)):
# not support/sortable value type
value = (0, None)
elif isinstance(value, bool):
value = (5, value)
elif isinstance(value, (int, float)):
value = (1, value)
elif isinstance(value, basestring):
value = (2, value)
elif isinstance(value, dict):
value = (3, _dict_parser(value))
elif isinstance(value, list):
if len(value) == 0:
# [] less then None
value = [(-1, [])]
else:
value = _list_parser(value)
if is_reverse is not None:
# list will firstly compare with other doc by it's smallest
# or largest member
value = max(value) if is_reverse else min(value)
else:
# if the smallest or largest member is a list
# then compaer with it's sub-member in list index order
value = (4, tuple(value))
return value
def sort(self, key_or_list, direction=None):
"""
Sorts a cursor object based on the input
:param key_or_list: a list/tuple containing the sort specification,
i.e. ('user_number': -1), or a basestring
:param direction: sorting direction, 1 or -1, needed if key_or_list
is a basestring
:return:
"""
# checking input format
sort_specifier = list()
if isinstance(key_or_list, list):
if direction is not None:
raise ValueError('direction can not be set separately '
'if sorting by multiple fields.')
for pair in key_or_list:
if not (isinstance(pair, list) or isinstance(pair, tuple)):
raise TypeError('key pair should be a list or tuple.')
if not len(pair) == 2:
raise ValueError('Need to be (key, direction) pair')
if not isinstance(pair[0], basestring):
raise TypeError('first item in each key pair must '
'be a string')
if not isinstance(pair[1], int) or not abs(pair[1]) == 1:
raise TypeError('bad sort specification.')
sort_specifier = key_or_list
elif isinstance(key_or_list, basestring):
if direction is not None:
if not isinstance(direction, int) or not abs(direction) == 1:
raise TypeError('bad sort specification.')
else:
# default ASCENDING
direction = 1
sort_specifier = [(key_or_list, direction)]
else:
raise ValueError('Wrong input, pass a field name and a direction,'
' or pass a list of (key, direction) pairs.')
# sorting
_cursordat = self.cursordat
total = len(_cursordat)
pre_sect_stack = list()
for pair in sort_specifier:
is_reverse = bool(1-pair[1])
value_stack = list()
for index, data in enumerate(_cursordat):
# get field value
not_found = None
for key in pair[0].split('.'):
not_found = True
if isinstance(data, dict) and key in data:
data = copy.deepcopy(data[key])
not_found = False
elif isinstance(data, list):
if not is_reverse and len(data) == 1:
# MongoDB treat [{data}] as {data}
# when finding fields
if isinstance(data[0], dict) and key in data[0]:
data = copy.deepcopy(data[0][key])
not_found = False
elif is_reverse:
# MongoDB will keep finding field in reverse mode
for _d in data:
if isinstance(_d, dict) and key in _d:
data = copy.deepcopy(_d[key])
not_found = False
break
if not_found:
break
# parsing data for sorting
if not_found:
# treat no match as None
data = None
value = self._order(data, is_reverse)
# read previous section
pre_sect = pre_sect_stack[index] if pre_sect_stack else 0
# inverse if in reverse mode
# for keeping order as ASCENDING after sort
pre_sect = (total - pre_sect) if is_reverse else pre_sect
_ind = (total - index) if is_reverse else index
value_stack.append((pre_sect, value, _ind))
# sorting cursor data
value_stack.sort(reverse=is_reverse)
ordereddat = list()
sect_stack = list()
sect_id = -1
last_dat = None
for dat in value_stack:
# restore if in reverse mode
_ind = (total - dat[-1]) if is_reverse else dat[-1]
ordereddat.append(_cursordat[_ind])
# define section
# maintain the sorting result in next level sorting
if not dat[1] == last_dat:
sect_id += 1
sect_stack.append(sect_id)
last_dat = dat[1]
# save result for next level sorting
_cursordat = ordereddat
pre_sect_stack = sect_stack
# done
self.cursordat = _cursordat
return self
def hasNext(self):
"""
Returns True if the cursor has a next position, False if not
:return:
"""
cursor_pos = self.cursorpos + 1
try:
self.cursordat[cursor_pos]
return True
except IndexError:
return False
def next(self):
"""
Returns the next record
:return:
"""
self.cursorpos += 1
return self.cursordat[self.cursorpos]
def count(self, with_limit_and_skip=False):
"""
Returns the number of records in the current cursor
:return: number of records
"""
return len(self.cursordat)
class TinyGridFS(object):
"""GridFS for tinyDB"""
def __init__(self, *args, **kwargs):
self.database = None
def GridFS(self, tinydatabase):
"""TODO: Must implement yet"""
self.database = tinydatabase
return self
def generate_id():
"""Generate new UUID"""
# TODO: Use six.string_type to Py3 compat
try:
return unicode(uuid1()).replace(u"-", u"")
except NameError:
return str(uuid1()).replace(u"-", u"")
| {
"repo_name": "schapman1974/tinymongo",
"path": "tinymongo/tinymongo.py",
"copies": "1",
"size": "27161",
"license": "mit",
"hash": -2574199815314873000,
"line_mean": 32.4495073892,
"line_max": 114,
"alpha_frac": 0.5095541401,
"autogenerated": false,
"ratio": 4.682931034482759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.569248517458276,
"avg_score": null,
"num_lines": null
} |
"""Actual device classes."""
import enum
import glob
import logging
import os
import os.path
import random
import re
import struct
import subprocess
import warnings
from typing import Any, List, Tuple
import serial
from . import usb
from .devices_base import Board, BoardMeta
try:
# See if we have vision support
import sb_vision # noqa: F401
ENABLE_VISION = True
except ImportError:
warnings.warn(
"sb_vision not installed, disabling vision support",
category=ImportWarning,
)
ENABLE_VISION = False
if ENABLE_VISION:
# Register the camera 'board' by importing it
from .camera import Camera # noqa: F401
LOGGER = logging.getLogger(__name__)
class MotorBoard(Board):
"""Student Robotics-era Motor board."""
lookup_keys = {
'ID_VENDOR_ID': '0403',
'subsystem': 'tty',
}
@classmethod
def included(cls, node):
# Also check the human-readable string as well as the numeric ID,
# since 0403:6001 is too general (it only identifies the device as an
# FTDI USB-serial bridge rather than as a motor board).
return node['ID_MODEL_ID'] == '6001' and node['ID_MODEL'] == 'MCV4B'
@classmethod
def name(cls, node):
"""Board name - actually fetched over serial."""
return node['ID_SERIAL_SHORT']
def start(self):
"""Open connection to peripheral."""
device = self.node['DEVNAME']
self.connection = serial.Serial(device, baudrate=1000000)
self.make_safe()
def make_safe(self):
"""
Set peripheral to a safe state.
This is called after control connections have died.
"""
# set both motors to brake
self.connection.write(b'\x00\x02\x02\x03\x02')
self._status = {'m0': 'brake', 'm1': 'brake'}
def status(self):
"""Brief status description of the peripheral."""
return self._status
@classmethod
def byte_for_speed(cls, value):
"""
Get the byte value for the given speed value.
Accepts float or a string of 'coast' or 'brake'.
"""
if value == 'coast':
return 1
elif value == 'brake':
return 2
elif -1 <= value <= 1:
return 128 + int(100 * value)
else:
raise ValueError('Unknown speed value: {}'.format(value))
def command(self, cmd):
"""Run user-provided command."""
self._status.update(cmd)
self.connection.write(bytes([
2, 2,
3, 2,
2, 1,
3, 1,
2, self.byte_for_speed(self._status['m0']),
3, self.byte_for_speed(self._status['m1']),
]))
class BrainTemperatureSensor(Board):
"""
Internal Raspberry Pi temperature sensor.
This has extremely limited practical use and is basically here to serve as
an example of how to add new devices.
"""
lookup_keys = {
'subsystem': 'thermal',
}
enabled = False
@classmethod
def name(cls, node):
"""Simple node name."""
return node.sys_name
def read_temperature_value(self):
path = '{}/temp'.format(self.node.sys_path)
with open(path) as file:
return int(file.read())
def status(self):
"""Brief status description of the peripheral."""
temp_milli_degrees = self.read_temperature_value()
return {'temperature': temp_milli_degrees / 1000}
class NoZoneFound(ValueError):
"""Indicates that a search for a zone file failed."""
pass
class GameState(Board):
"""
State storage for the game, keeps a store of everything it has received.
"""
FILE_GLOB = '/media/usb?/zone-?'
ZONE_REGEX = re.compile('zone-(\d)')
IGNORE_DIRS_CONTAINING_FILE_NAMES = ('main.py',)
# define the name of the board
board_type_id = 'game'
create_on_startup = True
def __init__(self):
super().__init__({})
@classmethod
def name(cls, node):
return 'state'
def as_siblings(self, file_path: str, file_names: List[str]) -> List[str]:
parent = os.path.dirname(file_path)
return [os.path.join(parent, x) for x in file_names]
def any_exist(self, file_paths: List[str]) -> bool:
return any(os.path.exists(x) for x in file_paths)
def find_zone(self):
for candidate_path in glob.iglob(self.FILE_GLOB):
match = self.ZONE_REGEX.search(candidate_path)
if match is None:
continue
if self.any_exist(self.as_siblings(
candidate_path,
self.IGNORE_DIRS_CONTAINING_FILE_NAMES,
)):
continue
return int(match.group(1))
raise NoZoneFound()
def status(self):
try:
return {
'zone': self.find_zone(),
'mode': 'competition',
}
except NoZoneFound:
return {'zone': 0, 'mode': 'development'}
class PowerOutput(enum.Enum):
"""An enumeration of the outputs on the power board."""
HIGH_POWER_0 = 0
HIGH_POWER_1 = 1
LOW_POWER_0 = 2
LOW_POWER_1 = 3
LOW_POWER_2 = 4
LOW_POWER_3 = 5
class PowerBoard(Board):
"""A power board."""
lookup_keys = {
'subsystem': 'usb',
'ID_VENDOR_ID': '1bda',
}
@classmethod
def included(cls, node):
return node['ID_MODEL_ID'] == '0010'
@classmethod
def name(cls, node):
"""Board name."""
return node['ID_SERIAL_SHORT']
def start(self):
"""Open connection to peripheral."""
# We get the bus path to the device inferred from the DEVPATH
# from systemd.
path = tuple(int(x) for x in (
self.node['DEVPATH'].rsplit('-', 1)[-1].split('.')
))
for device in usb.enumerate_devices():
if device.path == path:
self.device = device
break
else:
raise RuntimeError('Cannot open USB device by path')
self.device.open()
self.make_safe()
# This power board is now ready; signal to systemd that robotd is
# therefore ready
subprocess.check_call([
'systemd-notify',
'--ready',
'--pid={}'.format(os.getppid()),
])
def _set_power_output(self, output: PowerOutput, level: bool) -> None:
self.device.control_write(
64,
int(level),
output.value,
)
def _set_power_outputs(self, level: bool) -> None:
for output in PowerOutput:
self._set_power_output(output, level)
def _set_start_led(self, value):
self.device.control_write(64, value, 6)
def _buzz_piezo(self, args):
data = struct.pack("HH", args['frequency'], args['duration'])
self.device.control_write(64, 0, 8, data)
@property
def start_button_status(self):
result = self.device.control_read(64, 0, 8, 4)
return any(result)
def make_safe(self):
self._set_power_outputs(0)
def status(self):
return {'start-button': self.start_button_status}
def command(self, cmd):
if 'power-output' in cmd and 'power-level' in cmd:
output = PowerOutput(cmd['power-output'])
power = bool(cmd['power-level'])
self._set_power_output(output, power)
elif 'power' in cmd:
power = bool(cmd['power'])
self._set_power_outputs(power)
elif 'start-led' in cmd:
value = bool(cmd['start-led'])
self._set_start_led(1 if value else 0)
elif 'buzz' in cmd:
self._buzz_piezo(cmd['buzz'])
class CommandError(RuntimeError):
"""The servo assembly experienced an error in processing a command."""
def __init__(self, command: Tuple[Any, ...], error: str, comments: List[str]) -> None:
self.command = command
self.error = error
self.comments = comments
def __str__(self):
return "\n".join([self.error, ''] + self.comments)
class InvalidResponse(ValueError):
"""The servo assembly emitted a response which could not be processed."""
def __init__(self, command: Tuple[Any, ...], response: bytes) -> None:
self.command = command
self.response = response
def __str__(self):
return "Invalid response from Arduino: {!r}".format(self.response)
class ServoAssembly(Board):
"""
A servo assembly.
Technically this is actually an Arduino with a servo shield attached.
"""
lookup_keys = {
'subsystem': 'tty',
}
NUM_SERVOS = 16
GPIO_IDS = range(2, 14)
INPUT = 'Z'
@classmethod
def included(cls, node):
if 'ID_MODEL_ID' not in node or 'ID_VENDOR_ID' not in node:
return False
return (node['ID_MODEL_ID'], node['ID_VENDOR_ID']) in [
('0043', '2a03'), # Fake Uno
('7523', '1a86'), # Real Uno
]
@classmethod
def name(cls, node):
"""Board name."""
return node.get('ID_SERIAL_SHORT',
'SB_{}'.format(node['MINOR']))
def start(self):
device = self.node['DEVNAME']
self.connection = serial.Serial(device, baudrate=115200, timeout=0.2)
if hasattr(self.connection, 'reset_input_buffer'):
self._reset_input_buffer = self.connection.reset_input_buffer
else:
self._reset_input_buffer = self.connection.flushInput
(self.fw_version,) = self._command('V')
self.fw_version = self.fw_version.strip()
self._servo_status = {}
self._pin_status = {}
self._pin_values = {}
self._analogue_values = {}
self._ultrasound_value = None
self.make_safe()
LOGGER.debug('Finished initialising servo assembly on %r', device)
def _command(self, *args, generic_command=False) -> List[str]:
command_id = random.randint(1, 65535)
while True:
self._reset_input_buffer()
command_id_part = '@{id} '.format(id=command_id).encode('utf-8')
command_args_part = ' '.join(str(x) for x in args).encode('utf-8')
line = command_id_part + command_args_part + b'\n'
self.connection.write(b'\0')
self.connection.write(line)
self.connection.flush()
LOGGER.debug('Sending to servo assembly: %r', line)
comments = [] # type: List[str]
results = [] # type: List[str]
while True:
line = self.connection.readline()
LOGGER.debug('Got back from servo: %r', line)
if not line:
# Leave the loop and reissue the command
break
if line.startswith(b'@'):
returned_command_id_str, line = line[1:].split(b' ', 1)
returned_command_id = int(
returned_command_id_str.decode('utf-8'),
) & 0xffff
if returned_command_id != command_id:
LOGGER.debug('Got response for different command, ignoring...')
continue
try:
if line.startswith(b'+ '):
return results
elif line.startswith(b'- '):
if b'unknown command' in line and not generic_command:
break # try again
else:
raise CommandError(
args,
line[2:].decode('utf-8'),
comments,
)
elif line.startswith(b'# '):
comments.append(line[2:].decode('utf-8').strip())
elif line.startswith(b'> '):
results.append(line[2:].decode('utf-8').strip())
else:
raise InvalidResponse(args, line)
except InvalidResponse:
if generic_command:
raise
else:
break
except ValueError:
break
def make_safe(self):
for servo in range(self.NUM_SERVOS):
self._set_servo(servo, None)
for pin in self.GPIO_IDS:
self._write_pin(pin, self.INPUT)
def _set_servo(self, servo, status):
if status is None:
level = 0
elif -1 <= status <= 1:
# Adjust to be in the range 0-1
status_unit = (status + 1) / 2
level = 150 + int((550 - 150) * status_unit)
else:
return
self._command('S', servo, level)
self._servo_status[str(servo)] = level
def _write_pin(self, pin, setting):
self._pin_status[pin] = setting
return self._command('W', pin, setting)
def _read_pin(self, pin):
result = self._command('R', pin)[0]
self._pin_values.update({pin: result})
def _read_analogue(self):
results = self._command('A')
for result in results:
name, value = result.split(' ')
voltage = int(value) * 5 / 1024
self._analogue_values.update({name: voltage})
def _read_ultrasound(self, trigger_pin, echo_pin):
found_values = []
for i in range(3):
result = self._command('U', trigger_pin, echo_pin)[0]
found_values.append(float(result))
self._ultrasound_value = list(sorted(found_values))[1] / 1000.0
def _generic_command(self, command):
try:
return {
'status': 'ok',
'data': self._command(*command, generic_command=True),
}
except (CommandError, InvalidResponse) as e:
return {
'status': 'error',
'type': type(e).__name__,
'description': str(e),
}
def status(self):
return {
'servos': self._servo_status,
'pins': self._pin_status,
'pin-values': self._pin_values,
'fw-version': self.fw_version,
'analogue-values': self._analogue_values,
'ultrasound': self._ultrasound_value,
}
def command(self, cmd):
# handle servos
servos = cmd.get('servos', {})
for servo_id, status in servos.items():
self._set_servo(int(servo_id), status)
# handle writing pins
pins = cmd.get('pins', {})
for pin, status in pins.items():
self._write_pin(int(pin), status)
# handle reading pins
self._pin_values = {}
pins = cmd.get('read-pins', [])
for pin in pins:
self._read_pin(int(pin))
# handle reading analogue pins
self._analogue_values = {}
read_analogue = cmd.get('read-analogue', False)
if read_analogue:
self._read_analogue()
# handle ultrasound
self._ultrasound_value = None
read_ultrasound = cmd.get('read-ultrasound', [])
if len(read_ultrasound) == 2:
self._read_ultrasound(read_ultrasound[0], read_ultrasound[1])
# handle direct command access
command = cmd.get('command', [])
if command:
return self._generic_command(command)
# Grab the full list of boards from the workings of the metaclass
BOARDS = BoardMeta.BOARDS
| {
"repo_name": "sourcebots/robotd",
"path": "robotd/devices.py",
"copies": "1",
"size": "15667",
"license": "mit",
"hash": 7356298522979578000,
"line_mean": 27.6941391941,
"line_max": 90,
"alpha_frac": 0.5376906874,
"autogenerated": false,
"ratio": 3.9623166413758217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5000007328775822,
"avg_score": null,
"num_lines": null
} |
""" Actual implementation of service """
import logging
from datetime import datetime
import threading
import xbmc
import xbmcgui
from resources.lib.service_api import Api
from resources.lib.objects.movies import FullMovieUpdater, IncrementalMovieUpdater
import resources.lib.objects.database as database
from resources.lib.util import window, settings # , sourcesXML
from resources.lib.date_utils import DateUtils
log = logging.getLogger("DINGS.library") # pylint: disable=invalid-name
class Library(threading.Thread):
""" Root service for sync """
client_version = '5'
_shared_state = {}
stop_thread = False
pdialog = None
title = None
count = 0
total = 0
date_utils = DateUtils()
def __init__(self):
super(Library, self).__init__(name='Library')
self.__dict__ = self._shared_state
self.monitor = xbmc.Monitor()
self.player = xbmc.Player()
self._abort_event = threading.Event()
self._abort_event.clear()
self.api = Api(
settings("host"),
settings("username"),
settings("password")
)
threading.Thread.__init__(self)
def run(self):
try:
self._run_internal()
except Exception as e:
log.exception(e)
finally:
window("dings_kodiscan", clear=True)
if self.pdialog:
self.pdialog.close()
self.monitor = None
self.pdialog = None
self.player = None
def _run_internal(self):
""" Starts the service """
log.debug("Starting service service.library.video...")
if not self.player.isPlaying():
self._start_sync()
while not (self._should_stop()):
if self._should_sync():
self._start_sync()
if self._should_stop():
# Set in service.py
log.debug("Service terminated thread.")
break
if self._should_stop() or self.monitor.waitForAbort(10):
# Abort was requested while waiting. We should exit
log.debug("waitForAbort")
break
log.warn("###===--- LibrarySync Stopped ---===###")
def _start_sync(self):
xbmc.executebuiltin('InhibitIdleShutdown(true)')
try:
start_time = datetime.now()
total, count = self.update()
if not self._should_stop():
self.set_last_sync(start_time)
elapsedtotal = datetime.now() - start_time
log.info("%s av %s filmer lagt til. Det tok %s",
count, total, str(elapsedtotal).split('.')[0])
except Exception as e:
log.error(e)
finally:
window('dings_kodiScan', clear=True)
if self.pdialog:
self.pdialog.close()
xbmc.executebuiltin('InhibitIdleShutdown(false)')
def show_progress(self, title):
dialog = None
dialog = xbmcgui.DialogProgressBG()
dialog.create("Dings for Kodi", title)
dialog.update(1)
return dialog
def update(self):
"""
Invokeds self._full_update if should_do_full_sync else _incremental_update()
returns total, count
"""
force = False
if force or self._should_do_full_sync():
return self._full_update()
return self._incremental_update()
def _delete_missing_movies(self, all_movies):
all_release_ids = [m.get('id') for m in all_movies]
window("dings_kodiscan", "true")
self.pdialog = self.show_progress("Deleting movies")
with database.DatabaseConn() as cursor_video:
movies_db = FullMovieUpdater(cursor_video)
movies_for_wipe = movies_db.get_movies_to_remove(all_release_ids)
log.info("Found %s movies to remove", len(movies_for_wipe))
for movie in self.added(movies_for_wipe):
movies_db.delete(movie)
log.info("Removed %s because it was not on remote", movie['title'])
window("dings_kodiscan", clear=True)
if self.pdialog:
self.pdialog.close()
log.info("Removing files done")
def _full_update(self):
start_time = datetime.now()
all_movies = self.api.get_all_movies()
total, count = self._do_update(all_movies, FullMovieUpdater)
if not self._should_stop():
self._delete_missing_movies(all_movies)
self.save_last_full_sync(start_time)
return total, count
def _incremental_update(self):
all_movies = self.api.get_movies_from(self.date_utils.get_str_date(self.get_last_sync()))
return self._do_update(all_movies, IncrementalMovieUpdater)
def _do_update(self, movies, db_factory):
if self._should_stop():
return 0, 0
l_count = 0
total = len(movies)
window("dings_kodiscan", "true")
self.pdialog = self.show_progress(db_factory.get_name())
with database.DatabaseConn() as cursor_video:
movies_db = db_factory(cursor_video)
for movie in self.added(movies, total):
if movies_db.update(movie):
log.debug("La til filmen %s id: %s, r: %s. %s/%s",
movie.get('title'), movie.get('imdb'), movie.get('id'), self.count, total)
l_count += 1
window('dings_kodiScan', clear=True)
if self.pdialog:
self.pdialog.close()
return total, l_count
def save_last_full_sync(self, last_sync):
"""
def save_last_full_sync(self, last_sync: datetime) -> None
Saves last full sync to settings
Arguments
last_sync: date
"""
settings('LastFullSync', self.date_utils.get_str_date(last_sync))
self._update_client_version()
def _get_last_full_sync(self):
last_sync = settings('LastFullSync')
if not last_sync:
return datetime(1970, 1, 1)
return self.date_utils.parse_str_date(last_sync)
def set_last_sync(self, last_sync):
settings('LastIncrementalSync', self.date_utils.get_str_date(last_sync))
self._update_client_version()
def get_sync_interval(self):
interval = settings("interval")
if not interval:
return 10
return int(interval)
def get_last_sync(self):
last_sync = settings('LastIncrementalSync')
if not last_sync:
return datetime(1970, 1, 1)
return self.date_utils.parse_str_date(settings('LastIncrementalSync'))
def _update_client_version(self):
if self._is_outdated_client():
settings('ClientVersion', self.client_version)
def _get_client_version(self):
client_version = settings('ClientVersion')
if not client_version:
return 1
return client_version
def _is_outdated_client(self):
return self._get_client_version() != self.client_version
def _should_do_full_sync(self):
if self._is_outdated_client():
return True
last_full_sync = self._get_last_full_sync()
interval_seconds = 24 * 60 * 60
diff = datetime.now() - last_full_sync
return diff.total_seconds() > interval_seconds
def _should_sync(self):
if self.player.isPlaying():
return False
last_sync = self.get_last_sync()
sync_interval = self.get_sync_interval()
interval_seconds = 60 * sync_interval # sync every 10 minutes
diff = datetime.now() - last_sync
return diff.total_seconds() > interval_seconds
def added(self, items, total=None):
"""
Generator to to check abort, and to show notifications yealds item of not abort
Arguments:
items: array
"""
self.total = total or len(items)
self.count = 0
for item in items:
if self._should_stop():
log.debug('should_stop from added')
break
self.title = item.get('title', "unknown")
yield item
self.update_pdialog()
self.count += 1
def update_pdialog(self):
if self.pdialog:
percentage = int((float(self.count) / float(self.total))*100)
self.pdialog.update(percentage, message=self.title)
def _should_stop(self):
return self._abort_event.is_set() or self.stop_thread or self.monitor.abortRequested()
def stopThread(self):
self.stop_thread = True
self._abort_event.set()
log.debug("Ending thread...") | {
"repo_name": "sebastian-steinmann/kodi-repo",
"path": "src/service.library.video/resources/lib/librarysync.py",
"copies": "1",
"size": "8739",
"license": "mit",
"hash": -4788943123480958000,
"line_mean": 30.103202847,
"line_max": 104,
"alpha_frac": 0.5767250257,
"autogenerated": false,
"ratio": 4.006877579092159,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007457496831443089,
"num_lines": 281
} |
""" Actual implementation of service """
import logging
import xbmc
import resources.lib.loghandler as loghandler
from librarysync import Library
loghandler.config()
log = logging.getLogger("DINGS.service") # pylint: disable=invalid-name
class Service(object):
""" Root service for sync """
library_running = False
library_thread = None
def __init__(self):
self.monitor = xbmc.Monitor()
def run(self):
""" Starts the service """
self.library_thread = Library()
log.debug("Starting service service.library.video...")
while not self.monitor.abortRequested():
if self.shouldRun():
self.library_running = True
self.library_thread.start()
# Sleep/wait for abort for 10 seconds
if self.monitor.waitForAbort(10):
log.info("Aborting!")
# Abort was requested while waiting. We should exit
break
self.shutdown()
def shouldRun(self):
return not self.library_running
def shutdown(self):
""" cleanup in case of abort """
xbmc.executebuiltin('InhibitIdleShutdown(false)')
self.monitor = None
if self.library_running:
self.library_thread.stopThread()
| {
"repo_name": "sebastian-steinmann/kodi-repo",
"path": "src/service.library.video/resources/lib/service_entry.py",
"copies": "1",
"size": "1293",
"license": "mit",
"hash": -2053233601908875000,
"line_mean": 26.5106382979,
"line_max": 72,
"alpha_frac": 0.6117556071,
"autogenerated": false,
"ratio": 4.458620689655173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5570376296755173,
"avg_score": null,
"num_lines": null
} |
"""actually Dropping unused columns now
Revision ID: be0687950ece
Revises: d547cd837350
Create Date: 2017-03-01 05:39:20.282931
"""
# revision identifiers, used by Alembic.
revision = 'be0687950ece'
down_revision = 'd547cd837350'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('nu_outbound_wrappers')
op.drop_column('feed_pages', 'srcname')
op.drop_column('feed_pages', 'feedurl')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('feed_pages', sa.Column('feedurl', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('feed_pages', sa.Column('srcname', sa.TEXT(), autoincrement=False, nullable=True))
op.create_table('nu_outbound_wrappers',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('actual_target', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('client_id', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('client_key', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('groupinfo', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('outbound_wrapper', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('referrer', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('releaseinfo', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('seriesname', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('validated', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('released_on', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='nu_outbound_wrappers_pkey'),
sa.UniqueConstraint('client_id', 'client_key', 'seriesname', 'releaseinfo', 'groupinfo', 'actual_target', name='nu_outbound_wrappers_client_id_client_key_seriesname_releas_key')
)
### end Alembic commands ###
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/00030_be0687950ece_actually_dropping_unused_columns_now.py",
"copies": "1",
"size": "2541",
"license": "bsd-3-clause",
"hash": -669237704050494700,
"line_mean": 40.6557377049,
"line_max": 181,
"alpha_frac": 0.7347500984,
"autogenerated": false,
"ratio": 3.5889830508474576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.975372007387746,
"avg_score": 0.014002615073999478,
"num_lines": 61
} |
#actually get the tweets and save em and what not
import tweepyutils
import pgutils
import parsetweets
import requests
import json
#import spellcheck
pgCursor = pgutils.getCursor()
list = None
tweetTypes = None
def log(string):
print string
def init():
global list
list = tweepyutils.search(q='dream last night', result_type='popular', count=100) + tweepyutils.search(q='dream last night', count=100)
#list = tweepyutils.search(q='dream last night',result_type='popular', count=10)
#list = tweepyutils.search(q='dream last night filter:images',result_type='popular', count=10)
list.sort(key=lambda x: x.favorite_count + x.retweet_count, reverse=True)
print(dir(list[0]))
#label retweets as null
list = [parsetweets.ignoreRTs(tweet) for tweet in list]
#remove null entries
list = [item for item in list if item != None]
#remove dupes
list = parsetweets.removeDupes(list)
#remove the twitter entities, search for those without nltk
list = [parsetweets.removeEntities(tweet) for tweet in list]
parsetweets.parseTweets(list)
searchAndInsertTweets(len(list))
pgutils.close()
def searchAndInsertTweets(count):
global tweetTypes
tweetTypes = pgutils.getRelationByValues('term_type','type')
for i in range(count):
tweet = list[i]
insert = searchTweetImages(tweet)
if insert:
insertTweet(tweet)
def searchTweetImages(tweet):
exists = pgutils.getQueryDictionary('SELECT COUNT(*) as exists FROM tweet WHERE twitter_id=%s', tweet.id_str)
if exists[0]['exists'] != 0:
return False
tweet.termIDs = []
tweet.screenNames = []
for type in tweetTypes:
typeObj = tweetTypes[type]
success = tweepyutils.fetchImages(typeObj, tweet)
if not success:
return False
return True
#this all gonna be done in tweepyutils.fetchImages
#entities
# media, user_mentions, hashtags.
#media can be grabbed directly
#user_mentions we just want profile pic
#hashtags we have to search
#noun phrases
#original username
def insertTweet(tweet):
twitterID = tweet.id_str
time = tweet.created_at
#oembed endpoint https://dev.twitter.com/docs/api/1/get/statuses/oembed v1 endpoint
oembedParams = {'id': twitterID, 'omit_script': 'true'}
oembedRequest = requests.get('https://api.twitter.com/1/statuses/oembed.json', params=oembedParams)
embedResponse = json.loads(oembedRequest.text)
embed_html = embedResponse['html']
processed = True
display = True
num_images = len(tweet.termIDs)
q = 'INSERT INTO tweet (twitter_id, time, embed_html, processed, display, num_images) VALUES ( %s, %s, %s, %s, %s, %s ) RETURNING tweet_id'
tweetID = pgutils.getQueryDictionary(q, twitterID, time, embed_html, processed, display, num_images)
tweetID = tweetID[0]['tweet_id']
#insert terms
if len(tweet.termIDs) > 0:
termIDsStr = ','.join(pgCursor.mogrify("(%s,%s)", (tweetID, termID)) for termID in tweet.termIDs)
q = 'INSERT INTO tweet_has_term (tweet_id, term_id) VALUES ' + termIDsStr
pgCursor.execute(q)
#insert users
if len(tweet.screenNames) > 0:
twitterUsersStr = ','.join(pgCursor.mogrify("(%s, %s, %s)", [twitterUser['screen_name'], tweetID, twitterUser['relationship']]) for twitterUser in tweet.screenNames)
q = 'INSERT INTO tweet_has_user (screen_name, tweet_id, relationship) VALUES ' + twitterUsersStr
pgCursor.execute(q)
init()
#for tag in tags:
# if tag[1][0] == "N":
# nouns.append(tag)
#print nouns
# nltk.help.upenn_tagset()
# http://www.nltk.org/book/ch07.html 7.2 Chunking
# https://gist.github.com/alexbowe/879414
# http://en.wikipedia.org/wiki/Noun_phrase
# http://en.wikipedia.org/wiki/English_grammar#Noun_phrases
| {
"repo_name": "nthitz/dreamlastnight",
"path": "dreamcatcher/getTweets.py",
"copies": "1",
"size": "3867",
"license": "mit",
"hash": 4903793119929637,
"line_mean": 33.8378378378,
"line_max": 173,
"alpha_frac": 0.6783035945,
"autogenerated": false,
"ratio": 3.3221649484536084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9403998058268486,
"avg_score": 0.01929409693702442,
"num_lines": 111
} |
# Actually Randomize Cards
# a.k.a. Randomize Cards As Opposed To Notes
# a.k.a. Randomize Cards Without Keeping Siblings Together
# bits and pieces cobbled together from anki/sched.py, aqt/browser.py, aqt/forms/browser.py
import random
from anki.hooks import addHook
from aqt import mw
from anki.utils import ids2str, intTime
from aqt.qt import *
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
def actuallyRandomize(cids, start=1, step=1):
scids = ids2str(cids)
now = intTime()
random.shuffle(cids)
due = {}
for c, cid in enumerate(cids):
due[cid] = start+c*step
# this looks needlessly complicated, but the "due" field acquires a new meaning for non-New cards
d = []
for (cid,) in mw.col.db.execute("select id from cards where type = 0 and id in "+scids):
d.append(dict(now=now, due=due[cid], usn=mw.col.usn(), cid=cid))
mw.col.db.executemany("update cards set due=:due,mod=:now,usn=:usn where id = :cid", d)
def actionActuallyRandomize(browser):
cards = browser.selectedCards()
browser.model.beginReset()
browser.mw.checkpoint(_("Reposition"))
actuallyRandomize(cards)
browser.onSearch(reset=False)
browser.mw.requireReset()
browser.model.endReset()
def setupMenus(browser):
action = QtGui.QAction(browser)
action.setObjectName(_fromUtf8("actionActuallyRandomize"))
action.setText(_("Actually Randomize"))
browser.form.menuEdit.addAction(action)
browser.connect(action, SIGNAL("triggered()"), lambda: actionActuallyRandomize(browser))
addHook('browser.setupMenus', setupMenus)
| {
"repo_name": "cooijmanstim/actually-randomize",
"path": "actually_randomize.py",
"copies": "2",
"size": "1668",
"license": "mit",
"hash": 3793556797617702000,
"line_mean": 33.75,
"line_max": 101,
"alpha_frac": 0.7146282974,
"autogenerated": false,
"ratio": 3.2578125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49724407974,
"avg_score": null,
"num_lines": null
} |
# ActuallySolveable Block Cipher
def bytesToInt(xs): return sum(x<<(i*8) for i,x in enumerate(xs))
def intToBytes(x): return [(x >> (8 * i)) & 0xFF for i in xrange(8)]
sbox = [ ((2 * i + 0x101) * 0x61 / 2) & 0xFF for i in range(256) ]
sinv = sorted(range(256), key=lambda i: sbox[i])
def T(block):
# bit transpose of 8 bytes
x = bytesToInt(block)
t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AAL
x = x ^ t ^ (t << 7)
t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCL
x = x ^ t ^ (t << 14)
t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0L
x = x ^ t ^ (t << 28)
return intToBytes(x)
def R(byte, n):
return (byte >> n) | ((byte & ((1 << n) - 1)) << (8 - n))
def encrypt_round(block, key):
for i in xrange(8):
# for i in xrange(3):
block = [ block[j] ^ key[(i + j) & 0x7] for j in xrange(8) ]
block = [ sbox[block[j]] for j in xrange(8) ]
block = [ R(block[j], j) for j in xrange(8) ]
block = T(block)
block = [ block[j] ^ block[i] if i != j else block[j] for j in xrange(8) ]
return [ block[j] ^ key[j] for j in xrange(8) ]
def decrypt_round(block, key):
block = [ block[j] ^ key[j] for j in xrange(8) ]
for i in xrange(7, -1, -1):
block = [ block[j] ^ block[i] if i != j else block[j] for j in xrange(8) ]
block = T(block)
block = [ R(block[j], 8 - j) for j in xrange(8) ]
block = [ sinv[block[j]] for j in xrange(8) ]
block = [ block[j] ^ key[(i + j) & 0x7] for j in xrange(8) ]
return block
def encrypt(message, key):
message += [0] * (-len(message) % 8)
out = []
iv = [0]*8
for bi in range(len(message)//8):
block = message[bi*8:bi*8+8]
block = [x^y for x,y in zip(block, iv)]
block2 = encrypt_round(block, key)
out += block2
iv = block2
return out
def decrypt(message, key):
out = []
iv = [0]*8
for bi in range(len(message)//8):
block = message[bi*8:bi*8+8]
block2 = decrypt_round(block, key)
block2 = [x^y for x,y in zip(block2, iv)]
out += block2
iv = block
return out | {
"repo_name": "nickbjohnson4224/greyhat-crypto-ctf-2014",
"path": "challenges/actually/actually.py",
"copies": "1",
"size": "2117",
"license": "mit",
"hash": -1274105100204192000,
"line_mean": 32.09375,
"line_max": 82,
"alpha_frac": 0.522909778,
"autogenerated": false,
"ratio": 2.6933842239185752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37162940019185753,
"avg_score": null,
"num_lines": null
} |
# Actuary, a Django Auditing app
# by Barry Melton, 7/5/2012
# MIT-licensed - http://www.opensource.org/licenses/mit-license.php/
import datetime
import settings
import logging
logger = logging.getLogger('django')
# Should import settings. There are settings values that need to be set,
# namely,
# ACTUARY_USE_CELERY, default = False
# ACTUARY_APIKEY, default = None, but will err if not present
# ACTUARY_TRACK_AJAX, default = False.
try:
settings.ACTUARY_USE_CELERY
except NameError:
settings.ACTUARY_USE_CELERY=True
try:
settings.ACTUARY_USE_MONGO
except NameError:
settings.ACTUARY_USE_MONGO=False
try:
settings.ACTUARY_TRACK_AJAX
except NameError:
settings.ACTUARY_TRACK_AJAX=True
class ActuaryMiddleware(object):
def process_request(self, request):
log_dict = {}
if request.is_ajax():
if settings.ACTUARY_TRACK_AJAX:
if not request.user is None:
log_dict["user"] = request.user.id
else:
log_dict["user"] = 0
log_dict["timestamp"] = datetime.datetime.now()
log_dict["host"] = request.get_host()
log_dict["method"] = request.method
log_dict["path"] = request.path
log_dict["full_path"] = request.get_full_path()
log_dict["raw_post"] = request.raw_post_data
else:
# We aren't logging AJAX requests, so we'll just pass here.
pass
print log_dict
if settings.ACTUARY_USE_CELERY:
from actuary.tasks import Actuary
pass
| {
"repo_name": "antiface/Django-Actuary",
"path": "actuary/middleware.py",
"copies": "2",
"size": "1647",
"license": "mit",
"hash": -4808610603088647000,
"line_mean": 29.5,
"line_max": 75,
"alpha_frac": 0.6077717061,
"autogenerated": false,
"ratio": 3.6518847006651884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012613210064900885,
"num_lines": 54
} |
"""Acumen-specific report generation
HTML formatted output summarizing Acumen results. The core reports here are
useful for any project using Acumen. The following reports provide a snapshot
of the state of data, metrics, alerts and events.
- acumen.html : overall summary
- aucmen.html#data : table of data feeds used
"""
import os
import json
from acumen import conf as AC
from acumen.utils.configs import data_configs
from acumen.utils.configs import load_configs
def _data(configs):
"""TODO: break this out as a new tabular metric that can be rendered?"""
data_details = []
html = '<table class="table table-hover table-striped sortable" style="width:auto;">'
html += '<thead>'
html += ' <tr>'
html += ' <th>URI</th>'
html += ' <th>Name</th>'
html += ' <th>Size (MB)</th>'
html += ' <th>Valid</th>'
html += ' <th>Error</th>'
html += ' <th>Updated</th>'
html += ' <th>Errored</th>'
html += ' </tr>'
html += '</thead>'
html += '<tbody>'
for uri, datum in sorted(configs.iteritems()):
try:
# Pretty format for HTML table display
size = '%0.2f' % (int(datum['size']['uncompressed']) / 1024.0 / 1024) if 'size' in datum else '-'
compressed_size = '%0.2f' % (int(datum['size']['compressed']) / 1024.0 / 1024) if 'size' in datum else '-'
valid_rows = '{:,}'.format(int(datum['rows']['valid'])) if 'rows' in datum else '-'
error_rows = '{:,}'.format(int(datum['rows']['error'])) if 'rows' in datum else '-'
updated = datum['update']['success'] if 'update' in datum else '-'
fail = datum['update']['fail'] if 'update' in datum and 'fail' in datum['update'] else '-'
# Push values through HTML template
html += """<tr>
<td><a href="#" onclick="downloadS3File('data/{uri}.csv')">{uri}.csv</a></td>
<td><a href="acumen/{uri}.html">{title}</a></td>
<td>{size} ~ {compressed_size}</td>
<td><a href="#" onclick="downloadS3File('data/{uri}__valid.csv')">{valid_rows}</a></td>
<td><a href="#" onclick="downloadS3File('data/{uri}__error.csv')">{error_rows}</a><a href="#" onclick="downloadS3File('data/{uri}__error_log.csv')">*</a></td>
<td>{updated}</td>
<td>{fail}</td>
</tr>""".format(
uri=uri,
title=datum['title'],
size=size,
compressed_size=compressed_size,
valid_rows=valid_rows,
error_rows=error_rows,
updated=updated,
fail=fail
)
except Exception as ex:
import traceback
print(traceback.format_exc())
html += '</tbody></table>'
return html
def _data_detail_columns(uri, datum):
html = """
<a name="columns"></a>
<table class="table table-hover table-striped sortable" style="width:auto;">
<thead>
<tr>
<th>Name</th>
<th>Type</th>
<th>Required</th>
<th>Description</th>
</tr>
</thead>
<tbody>
""".format(title=datum['title'] if 'title' in datum else uri,
description=datum['description'] if 'description' in datum else 'No description.')
cols = datum['columns'] if 'columns' in datum else []
for col in cols:
html += """
<tr>
<td><a name="{name}"></a>{name}</td>
<td>{type}</td>
<td>{required}</td>
<td>{description}</td>
</tr>
""".format(name=col['name'],
description=col['description'] if 'description' in col else 'No description.',
type=col['type'] if 'type' in col else 'Unkown',
required=col['required'] if 'required' in col else 'Unknown')
html += """</tbody></table>"""
return html
def get_title(config={}, default='No Title'):
return config['title'] if 'title' in config else default
def get_description(config={}, default='No description'):
return config['description'] if 'description' in config else default
def _data_detail_used_by(uri, datum, all_configs):
html = """
<a name="used_by"></a>
<table class="table table-hover table-striped sortable" style="width:auto;">
<thead>
<tr>
<th>URI</th>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
"""
used_bys = datum['used_by'] if 'used_by' in datum else []
for used_by in used_bys:
# Try to find the name/description from the dependency.
title = get_title()
description = get_description()
for dt in ['data', 'metrics', 'alerts']:
if used_by in all_configs[dt]:
title = get_title(all_configs[dt][used_by])
description = get_description(all_configs[dt][used_by])
html += """
<tr>
<td><a name="{uri}"></a>{uri}</td>
<td>{title}</td>
<td>{description}</td>
</tr>
""".format(uri=used_by, title=title, description=description)
html += """</tbody></table>"""
return html
def _metrics(configs):
"""TODO: break this out as a new tabular metric that can be rendered?"""
data_details = []
html = '<table class="table table-hover table-striped sortable" style="width:auto;">'
html += '<thead>'
html += ' <tr>'
html += ' <th>URI</th>'
html += ' <th>Name</th>'
html += ' </tr>'
html += '</thead>'
html += '<tbody>'
for uri, datum in sorted(configs.iteritems()):
try:
# Pretty format for HTML table display
size = '%0.2f' % (int(datum['size']['uncompressed']) / 1024.0 / 1024) if 'size' in datum else '-'
compressed_size = '%0.2f' % (int(datum['size']['compressed']) / 1024.0 / 1024) if 'size' in datum else '-'
# Push values through HTML template
html += """<tr>
<td><a href="#" onclick="downloadS3File('metric/{uri}.csv')">{uri}.csv</a></td>
<td><a href="acumen/{uri}.html">{title}</a></td>
</tr>""".format(
uri=uri,
title=datum['title'],
size=size,
compressed_size=compressed_size,
)
except Exception as ex:
import traceback
print(traceback.format_exc())
html += '</tbody></table>'
return html
def process_report():
# Clear existing directory.
import shutil
shutil.rmtree(AC.REPORT_DIR)
# Copy over all static assets.
shutil.copytree(AC.REPORT_TEMPLATE_DIR, AC.REPORT_DIR)
# Acumen page with data, metrics, alerts, and events.
with open(AC.REPORT_TEMPLATE_DIR + 'acumen.html') as f:
template = f.read()
# Load all the configs and calc dependency trees.
all_configs = load_configs()
# Main page for the Acumen summary.
with open(AC.REPORT_DIR + 'acumen.html', 'w') as f:
f.write(template
.replace('{data}', _data(all_configs['data']))
.replace('{metrics}', _metrics(all_configs['metrics']))
.replace('{alerts}', 'No alerts configured.')
.replace('{events}', 'No events displayed.')
)
# Data detail page per data feed.
with open(AC.REPORT_DIR + 'acumen_data.html') as f:
data_template = f.read()
if not os.path.exists(AC.REPORT_DIR + 'acumen'):
os.makedirs(AC.REPORT_DIR + 'acumen')
for uri, datum in all_configs['data'].iteritems():
try:
columns = _data_detail_columns(uri, datum)
used_by = _data_detail_used_by(uri, datum, all_configs)
content = (data_template
.replace('{title}', get_title(datum))
.replace('{description}', get_description(datum))
.replace('{used_by}', used_by)
.replace('{columns}', columns)
.replace('{config}', json.dumps(datum, sort_keys=True, indent=4, separators=(',', ': '))))
with open(AC.REPORT_DIR + 'acumen/' + uri + '.html', 'w') as f:
f.write(content)
except Exception as ex:
import traceback
print(traceback.format_exc())
# Metric detail page per config.
with open(AC.REPORT_DIR + 'acumen_metrics.html') as f:
template = f.read()
if not os.path.exists(AC.REPORT_DIR + 'acumen'):
os.makedirs(AC.REPORT_DIR + 'acumen')
for uri, datum in all_configs['metrics'].iteritems():
try:
content = (template
.replace('{title}', get_title(datum))
.replace('{description}', get_description(datum))
.replace('{config}', json.dumps(datum, sort_keys=True, indent=4, separators=(',', ': '))))
with open(AC.REPORT_DIR + 'acumen/' + uri + '.html', 'w') as f:
f.write(content)
except Exception as ex:
import traceback
print(traceback.format_exc())
if __name__ == "__main__":
import sys
if len(sys.argv) < 1:
print 'Usage:'
print 'python -m acumen.report'
else:
process_report()
| {
"repo_name": "jfalkner/acumen",
"path": "acumen/report.py",
"copies": "1",
"size": "9266",
"license": "apache-2.0",
"hash": -7317640222883566000,
"line_mean": 37.1316872428,
"line_max": 183,
"alpha_frac": 0.539607166,
"autogenerated": false,
"ratio": 3.778955954323002,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48185631203230017,
"avg_score": null,
"num_lines": null
} |
# acu.py
# script that scape the acu votes on "http://acuratings.conservative.org/acu-federal-legislative-ratings/"
# last time validated: Oct 16, 2015
# Owner: Yubo Tian
from bs4 import BeautifulSoup
import urllib.request
import psycopg2
import re
from optparse import OptionParser
import sys
BASE_URL = "http://acuratings.conservative.org/Vote-Description/"
ERR = -1;
# function that takes in a vote_url, return the formatted vote info
def process_vote(vote_url):
# variables correlate to this curr vote are named v_variable
v_resp = urllib.request.urlopen(vote_url)
v_html = v_resp.read()
v_soup = BeautifulSoup(v_html, "lxml")
v_title = v_soup.findAll('h1')[1].text.strip()
if (len(v_title) <= 1):
return ERR;
v_t = v_title.lower().find('roll call');
if (v_t == -1):
return ERR;
vote_number = int(re.search(r'\d+', v_title[v_t:]).group())
v_description = v_soup.find('h3').text.strip()
v_d = re.search(r'\s(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)(\S)*(\W)*\d*(\W)*\d\d\d\d', v_description)
v_date = date_convert(v_d.group())
print(v_date + '\t' + v_title)
return {'vote_date' : v_date, 'vote_number' : vote_number, 'vote_description' : v_description, 'vote_title' : v_title};
def date_convert(date_str):
day_i = int(re.search(r'\d+', date_str).group());
if day_i < 10:
day = '0'+str(day_i)
else:
day = str(day_i)
month = month_convert(date_str[1:4]);
year = date_str[-4:];
return '-'.join((year,month,day))
def month_convert(month):
return {
'jan':'01', 'feb':'02', 'mar':'03', 'apr':'04', 'may':'05', 'jun':'06', 'jul':'07', 'aug':'08', 'sep':'09', 'oct':'10', 'nov':'11', 'dec':'12',
}[month.lower()]
def main(argv):
# parse command line input
usage = "usage: %prog -o <filename> -y <year1,year2,yearN>"
parser = OptionParser(usage=usage)
parser.add_option("-o", action="store", type="string", dest="output_filename", help="set output filename")
parser.add_option("-y", type="string", dest="years") #default action is store
(options, args) = parser.parse_args(argv)
if not options.years or not options.output_filename:
parser.error("Incorrect number of arguments")
target_years = options.years.split(',')
# set up python and psql conenction
try:
conn = psycopg2.connect("dbname = 'politics'");
except:
print ('Unable to set up connection, exiting')
return
with open(options.output_filename, "w") as text_file:
for year in target_years:
print('\t\t========= ACU Result of year ' + str(year) +' ========= \n', file=text_file)
print ("\nProcessing ACU votes of year " + str(year) + '......')
year_url = BASE_URL + "?year1=" + str(year)
index = 1
# increment from 1 until vote url is empty
# alternatively, for each year, go to the url to grab the total # of votes
while True:
curr_vote_url = year_url + "&chamber=12&issue=" + str(index) + "&s="
vote_info = process_vote(curr_vote_url);
if (vote_info == ERR):
break
# use vote_number, vote_date in vote_info to query db, output to file
curs = conn.cursor()
query = "SELECT id, date, subject FROM votes WHERE number = " + str(vote_info['vote_number']) + " AND date = '" + vote_info['vote_date']+ "';"
curs.execute(query)
for row in curs:
print(row, file=text_file)
#print(row)
#print ("Vote # = " + str(index))
#print('{'+'\n'.join("'%s' : '%s'" % (key, val) for (key,val) in vote_info.items())+'}')
print('{'+'\n'.join("'%s' : '%s'" % (key, val) for (key,val) in vote_info.items())+'}' +'\n', file=text_file)
index = index + 1
if __name__ == "__main__":
main(sys.argv[1:])
print ("\nbye python script\n") | {
"repo_name": "bwalenz/us-congress-data",
"path": "scripts/SIG-scripts/acu.py",
"copies": "1",
"size": "3657",
"license": "apache-2.0",
"hash": 5297706230622604000,
"line_mean": 27.8031496063,
"line_max": 146,
"alpha_frac": 0.6253759912,
"autogenerated": false,
"ratio": 2.76,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38853759911999997,
"avg_score": null,
"num_lines": null
} |
# a = current, b = total
def divide_by_seven(current, total):
total = total + current
if total > 7:
total = total%7
return current, total
def century_item(current, total):
current = current % 4
current = 3 - current
current = 2 * current
return divide_by_seven(current, total)
def year_item(current, total):
dozens = current // 12
overplus = current%12
fours = overplus // 4
current = dozens + overplus + fours
return divide_by_seven(current, total)
def month_item(current, total):
if current == 1:
current = 0
elif current == 2:
current = 3
elif current == 3:
current = 3
elif current == 4:
current = 6
elif current == 5:
current = 1
elif current == 6:
current = 4
elif current == 7:
current = 6
elif current == 8:
current = 2
elif current == 9:
current = 5
elif current == 10:
current = 0
elif current == 11:
current = 3
elif current == 12:
current = 5
return divide_by_seven(current, total)
def day_item(year, month, current, total):
if month == 1 or month == 2:
if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
if (current - 1) == 0:
current = 7
return divide_by_seven(current, total)
def day_of_the_week(y, m, d):
current = 0
total = 0
current, total = century_item(int(str(y)[:2]), total)
current, total = year_item(y % 100, total)
current, total = month_item(m, total)
current, total = day_item(y, m, d, total)
data = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
return(data[total])
#print(day_of_the_week(1845, 5, 23))
#print(day_of_the_week(1974, 9, 7))
#print(day_of_the_week(1977,10, 16))
#print(day_of_the_week(2016, 5, 23))
#print(day_of_the_week(2016, 5, 24))
| {
"repo_name": "niklasnson/TDP015",
"path": "inlämning_4/day_of_the_week.py",
"copies": "1",
"size": "1880",
"license": "mit",
"hash": -3957070365534817300,
"line_mean": 26.2463768116,
"line_max": 66,
"alpha_frac": 0.5622340426,
"autogenerated": false,
"ratio": 3.2302405498281788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4292474592428179,
"avg_score": null,
"num_lines": null
} |
"""A Custom Estimator implementing linear regression for MNIST using Keras.
For reference:
* https://www.tensorflow.org/extend/estimators.
* https://www.tensorflow.org/get_started/mnist/beginners.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
print (tf.__version__) # tested with v 1.2
import tensorflow.contrib.keras as K
# Model builder
from tensorflow.python.estimator import model_fn as model_fn_lib
# Input function
from tensorflow.python.estimator.inputs import numpy_io
# MNIST
from tensorflow.examples.tutorials.mnist import input_data
# Run an experiment
from tensorflow.contrib.learn.python.learn import learn_runner
# Enable TensorFlow logs
tf.logging.set_verbosity(tf.logging.INFO)
# Define the model, using Keras
def model_fn(features, labels, mode, params):
"""Model function for linear regressor."""
logits = K.layers.Dense(10, input_dim=784)(features['x'])
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
learning_rate=params['learning_rate'],
optimizer='Adam')
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
tf.argmax(input=logits, axis=1),
tf.argmax(input=labels, axis=1))
}
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
eval_metric_ops=eval_metric_ops)
# Import the MNIST dataset
mnist = input_data.read_data_sets('/tmp/MNIST/', one_hot=True)
x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels
# parameters
LEARNING_RATE = 1e-4
BATCH_SIZE = 128
STEPS = 10000
model_params = {'learning_rate': LEARNING_RATE}
# Input functions
x_train_dict = {'x': x_train}
train_input_fn = numpy_io.numpy_input_fn(
x_train_dict, y_train, batch_size=BATCH_SIZE,
shuffle=True, num_epochs=None, queue_capacity=1000, num_threads=1)
x_test_dict = {'x': x_test}
test_input_fn = numpy_io.numpy_input_fn(
x_test_dict, y_test, batch_size=BATCH_SIZE, shuffle=False, num_epochs=1)
# create experiment
def experiment_fn(run_config, hparams):
# create estimator
del hparams # unused arg
estimator = tf.estimator.Estimator(model_fn=model_fn,
params=model_params,
config=run_config)
return tf.contrib.learn.Experiment(
estimator,
train_input_fn=train_input_fn,
eval_input_fn=test_input_fn,
train_steps=STEPS
)
# run experiment
learn_runner.run(
experiment_fn,
run_config=tf.contrib.learn.RunConfig(model_dir='/tmp/beginners_mnist'))
| {
"repo_name": "mari-linhares/tensorflow-workshop",
"path": "code_samples/MNIST_Estimators/keras/beginners.py",
"copies": "1",
"size": "3207",
"license": "apache-2.0",
"hash": 1474506712587315200,
"line_mean": 26.1779661017,
"line_max": 77,
"alpha_frac": 0.6894293732,
"autogenerated": false,
"ratio": 3.3722397476340693,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45616691208340693,
"avg_score": null,
"num_lines": null
} |
"""A custom grid plane with a lot more flexibility than
GridPlane. This also only works for non-unstructured/non-polygonal
datasets.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance
from traitsui.api import View, Group, Item
# Local imports
from mayavi.components import custom_grid_plane
from mayavi.modules.contour_grid_plane import \
ContourGridPlane, Contour, Actor
######################################################################
# `CustomGridPlane` class.
######################################################################
class CustomGridPlane(ContourGridPlane):
grid_plane = Instance(custom_grid_plane.Component,
allow_none=False, record=True)
# Overriding the ContourGridPlane's default view.
view = View(Group(Item(name='grid_plane', style='custom'),
show_labels=False,
label='GridPlane'),
Group(Group(Item(name='enable_contours')),
Group(Item(name='contour', style='custom',
enabled_when='object.enable_contours'),
show_labels=False,
),
label='Contour',
),
Group(Item(name='actor', style='custom'),
label='Actor',
show_labels=False)
)
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
# Note that we don't call the parent class method here. This
# is intentional to avoid problems with the execution of the
# VTK pipeline.
# Create the components.
self.grid_plane = custom_grid_plane.CustomGridPlane()
self.contour = Contour(auto_contours=True, number_of_contours=10)
self.actor = Actor()
self.enable_contours = False
self.actor.property.point_size = 2
| {
"repo_name": "liulion/mayavi",
"path": "mayavi/modules/custom_grid_plane.py",
"copies": "3",
"size": "2178",
"license": "bsd-3-clause",
"hash": 9023820482341209000,
"line_mean": 37.2105263158,
"line_max": 74,
"alpha_frac": 0.5142332415,
"autogenerated": false,
"ratio": 4.663811563169165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6678044804669165,
"avg_score": null,
"num_lines": null
} |
"""A custom ILAMB confrontation for net ecosystem productivity (nep)."""
import os
import numpy as np
from ILAMB.Confrontation import Confrontation
from ILAMB.Variable import Variable
from ILAMB.ilamblib import MakeComparable
class ConfNEP(Confrontation):
"""Confront ``nep`` model outputs with ``nee`` observations.
Net ecosystem productivity (``nep``) is a CMIP5 standard output
provided by the MsTMIP models, and is the inverse of net ecosystem
exchange (``nee``), for which benchmark datasets are provided in
ILAMB.
"""
def __init__(self, **keywords):
super(ConfNEP, self).__init__(**keywords)
def stageData(self, m):
obs = Variable(filename=self.source,
variable_name=self.variable)
obs.data *= -1.0 # Reverse sign of benchmark data.
mod = m.extractTimeSeries(self.variable,
alt_vars=self.alternate_vars)
mod.data *= -1.0 # Reverse sign of modified model outputs.
obs, mod = MakeComparable(obs, mod, clip_ref=True,
logstring="[%s][%s]" %
(self.longname, m.name))
return obs, mod
| {
"repo_name": "permamodel/ILAMB-experiments",
"path": "2/ConfNEP.py",
"copies": "1",
"size": "1208",
"license": "mit",
"hash": 7404815312859305000,
"line_mean": 33.5142857143,
"line_max": 72,
"alpha_frac": 0.6117549669,
"autogenerated": false,
"ratio": 3.8594249201277955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9971179887027795,
"avg_score": 0,
"num_lines": 35
} |
''' A custom Importer making use of the import hook capability
https://www.python.org/dev/peps/pep-0302/
Its purpose is to convert would-be Python module that use non-standard
syntax into a correct form prior to importing them.
'''
# imp is deprecated but I wasn't (yet) able to figure out how to use
# its replacement, importlib, to accomplish all that is needed here.
import imp
import re
import sys
MAIN = False
from_experimental = re.compile("(^from\s+__experimental__\s+import\s+)")
class ExperimentalImporter(object):
'''According to PEP 302, an importer only needs two methods:
find_module and load_module.
'''
def find_module(self, name, path=None):
'''We don't need anything special here, so we just use the standard
module finder which, if successful,
returns a 3-element tuple (file, pathname, description).
See https://docs.python.org/3/library/imp.html for details
'''
self.module_info = imp.find_module(name)
return self
def load_module(self, name):
'''Load a module, given information returned by find_module().
'''
# According to PEP 302, the following is required
# if reload() is to work properly
if name in sys.modules:
return sys.modules[name]
path = self.module_info[1] # see find_module docstring above
module = None
if path is not None: # path=None is the case for some stdlib modules
with open(path) as source_file:
module = self.convert_experimental(name, source_file.read())
if module is None:
module = imp.load_module(name, *self.module_info)
return module
def convert_experimental(self, name, source):
'''Used to convert the source code, and create a new module
if one of the lines is of the form
^from __experimental__ import converter1 [, converter2, ...]
(where ^ indicates the beginning of a line)
otherwise returns None and lets the normal import take place.
Note that this special code must be all on one physical line --
no continuation allowed by using parentheses or the
special \ end of line character.
"converters" are modules which must contain a function
transform_source_code(source)
which returns a tranformed source.
'''
global MAIN
lines = source.split('\n')
for linenumber, line in enumerate(lines):
if from_experimental.match(line):
break
else:
return None # normal importer will handle this
# we started with: "from __experimental__ import converter1 [,...]"
line = from_experimental.sub(' ', line)
# we now have: "converter1 [,...]"
line = line.split("#")[0] # remove any end of line comments
converters = line.replace(' ', '').split(',')
# and now: ["converter1", ...]
# drop the "fake" import from the source code
del lines[linenumber]
source = '\n'.join(lines)
for converter in converters:
mod_name = __import__(converter)
source = mod_name.transform_source_code(source)
module = imp.new_module(name)
# From PEP 302: Note that the module object must be in sys.modules
# before the loader executes the module code.
# This is crucial because the module code may
# (directly or indirectly) import itself;
# adding it to sys.modules beforehand prevents unbounded
# recursion in the worst case and multiple loading in the best.
sys.modules[name] = module
if MAIN: # see below
module.__name__ = "__main__"
MAIN = False
exec(source, module.__dict__)
return module
sys.meta_path = [ExperimentalImporter()]
if __name__ == '__main__':
if len(sys.argv) >= 1:
# this program was started by
# $ python import_experimental.py some_script
# and we will want some_script.__name__ == "__main__"
MAIN = True
__import__(sys.argv[1])
| {
"repo_name": "aroberge/python_experiments",
"path": "version4/import_experimental.py",
"copies": "1",
"size": "4194",
"license": "cc0-1.0",
"hash": 3537951259025348600,
"line_mean": 33.95,
"line_max": 78,
"alpha_frac": 0.6099189318,
"autogenerated": false,
"ratio": 4.319258496395468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5429177428195469,
"avg_score": null,
"num_lines": null
} |
"""A custom JSON encoder with support for Decimal and SqlAlchemy models."""
import json
import decimal as d
from db import OrmBase
from sqlalchemy.inspection import inspect
from datetime import datetime
class TswStatsEncoder(json.JSONEncoder):
"""A custom JSON encoder with support for Decimal and SqlAlchemy models."""
def default(self, obj):
"""Per JSONEncoder, do JSON encoding."""
if isinstance(obj, d.Decimal):
return self.encodeDecimal(obj)
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, OrmBase):
return self.encodeModel(obj)
return json.JSONEncoder.default(self, obj)
def encodeDecimal(self, obj):
"""Do the work of encoding a Decimal."""
if int(obj) == obj:
return int(obj)
else:
return '{:.2f}'.format(obj)
def encodeModel(self, obj):
"""Do the work of encoding a SqlAlchemy Model."""
inspct = inspect(obj)
rv = {'__class__': type(obj).__name__}
for colName in inspct.mapper.column_attrs.keys():
val = inspct.attrs[colName].value
rv[colName] = val
return rv
| {
"repo_name": "proegssilb/tsw-stats",
"path": "jsonEncoder.py",
"copies": "1",
"size": "1203",
"license": "apache-2.0",
"hash": 5593440801891939000,
"line_mean": 31.5135135135,
"line_max": 79,
"alpha_frac": 0.6184538653,
"autogenerated": false,
"ratio": 4.177083333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 37
} |
"""A custom labler class.
Custom labler for checking and removing less used categorical
class and also ensure to data coverage is effective.
Default settings for data coverage is 80%.
"""
import pandas as pd
class CUST_CATEGORY_LABELER():
"""Custom Mapper Function.
Based on pd.Series.values_counts, a labler is prepared
to cover one of following details
1. cover top 80% of groups(DEFAULT) (or)
2. top 500 groups
A special `transform_analysis` function is provided to
understand how value_counts are spread out
Example:
>>> # Test Data
>>> ss = pd.Series(np.arange(5000) // 5)
>>> ss = ss.map(lambda x: str(x))
>>>
>>> # creating labler
>>> labler = CUST_CATEGORY_LABELER()
>>> labler.fit(funder)
>>>
>>> # testing
>>> _ = labler.check_group_coverage(90)
90 percentage of GROUPS coverage mean, 1691(in number) groups
>>>
>>> _ = labler.check_data_coverage(90)
90 percentage of DATA coverage mean, 666 (in number) groups
"""
def __init__(self):
"""Defaults."""
self.DB = None
self.GROUP_COVERAGE = 500
self.DATA_COVERAGE_LIMIT = 80
self.DATA = None
self.DATA_VC = None
def fit(self, col_data):
"""Fit the data to class.
Args:
data(ndarray)
"""
if type(col_data) != pd.core.series.Series:
return 'Error: input data should be - pd.core.series.Series'
self.DATA = col_data
# by default values counts are sorted
self.DATA_VC = self.DATA.value_counts()
# converting them to percentages
self.DATA_VC /= (self.DATA.shape[0] / 100)
def check_data_coverage(self, data_coverage=None):
"""Check the data coverage.
Args:
check_data_coverage(float): Range is (0.0, 100.0)
"""
if data_coverage is None:
# default coverage is 80(%)
data_coverage = self.DATA_COVERAGE_LIMIT
if data_coverage < 1:
return 'InputError: provide inputs between (0.0 and 100.00]'
counter = 1
cum_group_percentage = 0
for group_name, group_percentage in list(self.DATA_VC.items()):
counter += 1
cum_group_percentage += group_percentage
if cum_group_percentage > data_coverage:
break
tmp = '%s percentage of DATA coverage mean, %s (in number) groups'
tmp %= (data_coverage, counter)
print(tmp)
return counter
def check_group_coverage(self, groups_coverage=80):
"""
param: groups_coverage - can be provided as fraction/int.
To convert fraction into proper count for inter checks.
Args:
* data_coverage(int): Range between (0 - 100)
percentage(%) of the groups to be covered.
"""
groups_count = 0
if groups_coverage:
if 0 < groups_coverage < 100:
groups_count = self.DATA_VC.shape[0] * groups_coverage / 100
else:
return 'InputError: input number to be in between (0.0 - 100.]'
else:
groups_count = self.GROUP_COVERAGE
print('Using default groups_coverage !')
tmp = '%s percentage of GROUPS coverage mean, %s(in number) groups'
tmp %= (groups_coverage, groups_count)
print(tmp)
return groups_count
def transform_analysis(self, data_coverage=None, groups_coverage=None):
"""Post transform data view.
Args:
* data_coverage(int): Range between (0 - 100)
percentage(%) of the amount data to be covered.
* groups_coverage(int/float):
Limit the amount groups(variety) coverage. All input can be
provided as fraction or a specific count with in limit.
Example:
>>> labler = CUST_CATEGORY_LABELER()
>>> labler.fit(RAW_X.funder)
>>>
>>> # to checking report for covering 85.50% data
>>> labler.transform_analysis(data_coverage=85.50)
"""
counter = 0
cum_group_percentage = 0
if data_coverage is None and groups_coverage is None:
return 'No Inputs provided'
if data_coverage or groups_coverage is None:
# prefer coverage of data
# # all groups
groups_coverage = self.DATA_VC.shape[0]
else:
# coverage of groups
# # all groups
data_coverage = 100
groups_coverage = self.check_group_coverage(groups_coverage)
for group_name, group_percentage in list(self.DATA_VC.items()):
counter += 1
cum_group_percentage += group_percentage
res = "%d, %.2f, %s, %.2f" % (counter, cum_group_percentage,
group_name, group_percentage)
print(res)
# hard limit counter - as print used in above.
if counter > 1000:
break
# soft limit counter
if (cum_group_percentage > data_coverage):
break
if (counter > groups_coverage):
break
def transform(self, groups_coverage=None):
"""
Default transformation is based on coverage.
If cumulative sum of groups frequencies then
label is to only cover upto top 80% of groups.
"""
if not groups_coverage:
groups_coverage = self.check_data_coverage()
# dictionary
self.DB = dict(self.DATA_VC.head(groups_coverage))
ss = dict(self.DATA_VC.head(groups_coverage))
def mapper(x):
if x in ss:
return x
else:
return 'someother'
return self.DATA.apply(mapper)
def fit_transform(self, col_data):
"""Fit data and then transform."""
self.fit(col_data)
return self.transform()
def etransform(self, data):
"""For external pd.series transformations."""
groups_coverage = self.check_data_coverage()
self.DB = dict(self.DATA_VC.head(groups_coverage))
ss = dict(self.DATA_VC.head(groups_coverage))
def mapper(x):
if x in ss:
return x
else:
return 'someother'
return data.apply(mapper)
| {
"repo_name": "msampathkumar/datadriven_pumpit",
"path": "scripts/sam_custom_labeler.py",
"copies": "1",
"size": "6494",
"license": "apache-2.0",
"hash": 6862054132002705000,
"line_mean": 31.3084577114,
"line_max": 79,
"alpha_frac": 0.5566676933,
"autogenerated": false,
"ratio": 4.238903394255875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5295571087555875,
"avg_score": null,
"num_lines": null
} |
""" a custom layer for 'argmax', maybe we should implement this in standard way.
more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/argmax.html
"""
from .register import register
def import_fluid():
import paddle.fluid as fluid
return fluid
def argmax_shape(input_shape, out_max_val=False, top_k=1, axis=-1):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (list of num): a list of number which represents the input shape
@out_max_val (bool): parameter from caffe's ArgMax layer
@top_k (int): parameter from caffe's ArgMax layer
@axis (int): parameter from caffe's ArgMax layer
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
input_shape = list(input_shape)
if axis < 0:
axis += len(input_shape)
assert (axis + 1 == len(input_shape)
), 'only can be applied on the last dimension[axis:%d, %s] now,'\
'make sure you have set axis param in xxx.prototxt file' \
% (axis, str(input_shape))
output_shape = input_shape
output_shape[-1] = top_k
if out_max_val is True:
output_shape[-1] *= 2
return output_shape
def argmax_layer(input, name, out_max_val=False, top_k=1, axis=-1):
""" build a layer of type 'ArgMax' using fluid
Args:
@input (variable): input fluid variable for this layer
@name (str): name for this layer
@out_max_val (bool): parameter from caffe's ArgMax layer
@top_k (int): parameter from caffe's ArgMax layer
@axis (int): parameter from caffe's ArgMax layer
Returns:
output (variable): output variable for this layer
"""
fluid = import_fluid()
if axis < 0:
axis += len(input.shape)
if out_max_val is True:
topk_var, index_var = fluid.layers.topk(input=input, k=top_k)
index_var = fluid.layers.cast(index_var, dtype=topk_var.dtype)
output = fluid.layers.concat(
[index_var, topk_var], axis=axis, name=name)
else:
topk_var, index_var = fluid.layers.topk(input=input, k=top_k, name=name)
output = index_var
return output
register(kind='ArgMax', shape=argmax_shape, layer=argmax_layer)
| {
"repo_name": "lcy-seso/models",
"path": "fluid/image_classification/caffe2fluid/kaffe/custom_layers/argmax.py",
"copies": "3",
"size": "2319",
"license": "apache-2.0",
"hash": -8575711416947760000,
"line_mean": 30.7671232877,
"line_max": 92,
"alpha_frac": 0.6295817163,
"autogenerated": false,
"ratio": 3.6065318818040435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5736113598104045,
"avg_score": null,
"num_lines": null
} |
""" A custom layer for 'axpy' which receives 3 tensors and output 1 tensor.
the function performed is:(the mupltiplication and add are elementewise)
output = inputs[0] * inputs[1] + inputs[2]
"""
from .register import register
def axpy_shape(input_shapes):
""" calculate the output shape of this layer using input shapes
Args:
@input_shapes (list of tuples): a list of input shapes
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
assert len(input_shapes) == 3, "not valid input shape for axpy layer"
assert len(input_shapes[0]) == len(input_shapes[1]), 'should have same dims'
output_shape = input_shapes[1]
assert (input_shapes[2] == output_shape),\
"shape not consistent for axpy[%s <--> %s]" \
% (str(output_shape), str(input_shapes[2]))
return output_shape
def axpy_layer(inputs, name):
""" build a layer of type 'Axpy' using fluid
Args:
@inputs (list of variables): input fluid variables for this layer
@name (str): name for this layer
Returns:
output (variable): output variable for this layer
"""
import paddle.fluid as fluid
assert len(inputs) == 3, "invalid inputs for axpy[%s]" % (name)
alpha = inputs[0]
x = inputs[1]
y = inputs[2]
output = fluid.layers.elementwise_mul(x, alpha, axis=0)
output = fluid.layers.elementwise_add(output, y, name=name)
return output
register(kind='Axpy', shape=axpy_shape, layer=axpy_layer)
| {
"repo_name": "qingqing01/models",
"path": "fluid/image_classification/caffe2fluid/kaffe/custom_layers/axpy.py",
"copies": "3",
"size": "1545",
"license": "apache-2.0",
"hash": 6581878940241726000,
"line_mean": 29.2941176471,
"line_max": 81,
"alpha_frac": 0.6459546926,
"autogenerated": false,
"ratio": 3.661137440758294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5807092133358295,
"avg_score": null,
"num_lines": null
} |
""" a custom layer for 'crop', maybe we should implement this in standard way.
more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/crop.html
"""
from .register import register
def crop_shape(input_shape, shape=None):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (num | list of num): a list of number or num which represents the input shape
@shape (list of integer): the shape of output
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
if isinstance(input_shape, list):
assert len(input_shape) == 2, "the number of crop's inputs must be 2"
return input_shape[1]
elif not shape is None:
assert len(shape) == len(
input_shape.shape), "input_shape is diff with output_shape"
return shape
else:
raise Exception, "crop_shape input error"
return None
def crop_layer(input, name, shape=None, axis=2, offset=None):
""" build a layer of type 'Crop' using fluid
Args:
@input (variables | list of variables): input fluid variable for this layer
@shape (list of integer): the shape of output
@name (str): name for this layer
@axis (integer): parameter from caffe's Crop layer
@offset (Variable|list/tuple of integer|None): parameter from caffe's Crop layer
Returns:
output (variable): output variable for this layer
"""
input_shape = None
output_shape = None
input_tensor = None
if isinstance(input, list):
assert len(input) == 2, "the number of crop's inputs must be 2"
input_shape = input[0].shape
output_shape = input[1].shape
input_tensor = input[0]
elif not shape is None:
assert len(shape) == len(
input.shape), "input_shape is diff with output_shape"
input_shape = input.shape
output_shape = shape
input_tensor = input
else:
raise Exception, "crop_layer input error"
assert len(output_shape) == len(
input_shape), "input_shape is diff with output_shape"
if axis < 0:
axis += len(input_shape)
if offset is not None:
assert (len(input_shape) - axis
) == len(offset), "invalid offset[%s] in crop layer" % (
str(offset))
offset = [0] * axis + offset
import paddle.fluid as fluid
output = fluid.layers.crop(
input_tensor, shape=output_shape, offsets=offset, name=name)
return output
register(kind='Crop', shape=crop_shape, layer=crop_layer)
| {
"repo_name": "kuke/models",
"path": "fluid/PaddleCV/caffe2fluid/kaffe/custom_layers/crop.py",
"copies": "1",
"size": "2632",
"license": "apache-2.0",
"hash": 2906157286032259000,
"line_mean": 33.1818181818,
"line_max": 98,
"alpha_frac": 0.6276595745,
"autogenerated": false,
"ratio": 4.012195121951219,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021799276809188798,
"num_lines": 77
} |
""" a custom layer for 'crop', maybe we should implement this in standard way.
more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/reduction.html
"""
from .register import register
def reduction_shape(input_shape, axis=0):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (list of num): a list of number which represents the input shape
@axis (int): parameter from caffe's reduction layer
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
if axis < 0:
axis += len(input_shape) + 1
assert axis <= len(input_shape), 'invalid axis[%d] error' % (axis)
return input_shape[0:axis]
def reduction_layer(input, name, axis=0, operation=1, coeff=1.0):
""" build a layer of type 'Crop' using fluid
Args:
@input (variable): input fluid variable for this layer
@name (str): name for this layer
@axis (int): parameter from caffe's reduction layer
@operation (int): parameter from caffe's reduction layer
@coeff (float): parameter from caffe's reduction layer
Returns:
output (variable): output variable for this layer
"""
assert operation >= 1 and operation <= 4, "reduction reduction [%s] error" % (
operation)
input_len = len(input.shape)
if axis < 0:
axis += input_len + 1
dim = range(input_len)
import paddle.fluid as fluid
if operation == 1: ## operation = SUM
output = fluid.layers.reduce_sum(
input, dim=dim[axis:], keep_dim=False, name=name)
elif operation == 2: ## operation = ASUM
absout = fluid.layers.abs(input)
output = fluid.layers.reduce_sum(
absout, dim=dim[axis:], keep_dim=False, name=name)
elif operation == 3: ## operation = SUMSQ
powout = fluid.layers.pow(x=input, factor=2.0)
output = fluid.layers.reduce_sum(
powout, dim=dim[axis:], keep_dim=False, name=name)
else: ## operation = MEAN
output = fluid.layers.reduce_mean(
input, dim=dim[axis:], keep_dim=False, name=name)
mulout = fluid.layers.scale(x=output, scale=coeff)
return mulout
register(kind='Reduction', shape=reduction_shape, layer=reduction_layer)
| {
"repo_name": "kuke/models",
"path": "fluid/PaddleCV/caffe2fluid/kaffe/custom_layers/reduction.py",
"copies": "1",
"size": "2316",
"license": "apache-2.0",
"hash": 1107174109217678200,
"line_mean": 33.5671641791,
"line_max": 95,
"alpha_frac": 0.6377374784,
"autogenerated": false,
"ratio": 3.8092105263157894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9936140729117671,
"avg_score": 0.0021614551196238144,
"num_lines": 67
} |
""" A custom layer for 'detectionout' used in 'SSD' model to produce outputs
Note: Since Paddle's implementation of 'detectionout' applied 'flatten' and 'softmax' ops on the input of 'conf',
while Caffe's implementation do not. Hence, you should ajust generated 'ssd.py' to remove 'softmax' and 'flatten' ops applied on 'conf' input.
"""
from .register import register
def detectionoutput_shape(input_shape):
""" the output shape of this layer is dynamic and not determined by 'input_shape'
Args:
@input_shape (list of int): input shape
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
output_shape = [-1, 6]
return output_shape
def detectionoutput_layer(inputs,
name,
background_label=0,
share_location=True,
nms_param=None,
keep_top_k=100,
confidence_threshold=0.1):
""" build a layer of type 'detectionout' using fluid
Args:
@inputs (list of variables): input fluid variables for this layer
@name (str): name for this layer
Returns:
output (variable): output variable for this layer
"""
import paddle.fluid as fluid
if nms_param is None:
nms_param = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0}
mbox_conf_flatten = inputs[1]
mbox_priorbox = inputs[2]
mbox_priorbox_list = fluid.layers.split(mbox_priorbox, 2, dim=1)
pb = mbox_priorbox_list[0]
pbv = mbox_priorbox_list[1]
pb = fluid.layers.reshape(x=pb, shape=[-1, 4])
pbv = fluid.layers.reshape(x=pbv, shape=[-1, 4])
mbox_loc = inputs[0]
mbox_loc = fluid.layers.reshape(
x=mbox_loc, shape=[-1, mbox_conf_flatten.shape[1], 4])
default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0}
fields = ['eta', 'top_k', 'nms_threshold']
for f in default.keys():
if not nms_param.has_key(f):
nms_param[f] = default[f]
nmsed_outs = fluid.layers.detection_output(
scores=mbox_conf_flatten,
loc=mbox_loc,
prior_box=pb,
prior_box_var=pbv,
background_label=background_label,
nms_threshold=nms_param["nms_threshold"],
nms_top_k=nms_param["top_k"],
keep_top_k=keep_top_k,
score_threshold=confidence_threshold,
nms_eta=nms_param["eta"])
return nmsed_outs
register(
kind='DetectionOutput',
shape=detectionoutput_shape,
layer=detectionoutput_layer)
| {
"repo_name": "qingqing01/models",
"path": "fluid/image_classification/caffe2fluid/kaffe/custom_layers/detection_out.py",
"copies": "2",
"size": "2576",
"license": "apache-2.0",
"hash": 4663929233976728000,
"line_mean": 31.6075949367,
"line_max": 146,
"alpha_frac": 0.6036490683,
"autogenerated": false,
"ratio": 3.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5103649068299999,
"avg_score": null,
"num_lines": null
} |
""" a custom layer for 'flatten', maybe we should implement this in standard way.
more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/flatten.html
"""
from .register import register
def flatten_shape(input_shape, axis=1, end_axis=-1):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (list of num): a list of number which represents the input shape
@axis (int): parameter from caffe's Flatten layer
@end_axis (int): parameter from caffe's Flatten layer
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
start_axis = axis
end_axis = end_axis
input_shape = list(input_shape)
if start_axis < 0:
start_axis += len(input_shape)
if end_axis < 0:
end_axis += len(input_shape) + 1
assert start_axis <= end_axis, 'invalid axis[%d] or end_axis[%d] params'\
% (start_axis, end_axis)
output_shape = input_shape[0:start_axis]
flat_sz = reduce(lambda a, b: a * b, input_shape[start_axis:end_axis])
output_shape += [flat_sz]
output_shape += input_shape[end_axis:-1]
return output_shape
def flatten_layer(input, name, axis=1, end_axis=-1):
""" build a layer of type 'Flatten' using fluid
Args:
@input (variable): input fluid variable for this layer
@name (str): name for this layer
@axis (int): parameter from caffe's Flatten layer
@end_axis (int): parameter from caffe's Flatten layer
Returns:
output (variable): output variable for this layer
"""
import paddle.fluid as fluid
input_shape = list(input.shape)
if input_shape[0] == -1:
input_shape[0] = 1
output_shape = flatten_shape(input_shape, axis=axis, end_axis=end_axis)
output_shape[0] = -1
else:
output_shape = flatten_shape(input_shape, axis=axis, end_axis=end_axis)
output = fluid.layers.reshape(input, shape=output_shape, name=name)
return output
register(kind='Flatten', shape=flatten_shape, layer=flatten_layer)
| {
"repo_name": "kuke/models",
"path": "fluid/PaddleCV/caffe2fluid/kaffe/custom_layers/flatten.py",
"copies": "3",
"size": "2107",
"license": "apache-2.0",
"hash": 4530649652735819000,
"line_mean": 30.9242424242,
"line_max": 93,
"alpha_frac": 0.6473659231,
"autogenerated": false,
"ratio": 3.6140651801029158,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5761431103202915,
"avg_score": null,
"num_lines": null
} |
""" A custom layer for 'normalize' op
"""
from .register import register
def normalize_shape(input_shape,
across_spatial=True,
scale_filler=True,
eps=1e-10):
""" calculate the output shape of this layer using input shapes
Args:
@input_shape (list of tuples): input shape
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
output_shape = input_shape
return output_shape
def normalize_layer(input,
name,
across_spatial=True,
scale_filler=True,
channel_shared=False,
eps=1e-10):
""" build a layer of type 'normalize' using fluid
Args:
@inputs (list of variables): input fluid variables for this layer
@name (str): name for this layer
Returns:
output (variable): output variable for this layer
"""
import paddle.fluid as fluid
param_prefix = name.split('.')[0]
assert across_spatial == False, "Only support across_spatial == False for Normalize[%s]" % (
name)
l2_norm = fluid.layers.l2_normalize(input, axis=1) # l2 norm along channel
shape = [1] if channel_shared else [input.shape[1]]
scale_attr = fluid.ParamAttr(name=param_prefix + '_scale')
scale_param = fluid.layers.create_parameter(
shape=shape, dtype=input.dtype, name=name, attr=scale_attr)
out = fluid.layers.elementwise_mul(
x=l2_norm, y=scale_param, axis=-1 if channel_shared else 1)
return out
register(kind='Normalize', shape=normalize_shape, layer=normalize_layer)
| {
"repo_name": "qingqing01/models",
"path": "fluid/image_classification/caffe2fluid/kaffe/custom_layers/normalize.py",
"copies": "3",
"size": "1679",
"license": "apache-2.0",
"hash": -4277120959281076000,
"line_mean": 28.9821428571,
"line_max": 96,
"alpha_frac": 0.6086956522,
"autogenerated": false,
"ratio": 4.055555555555555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000585958547361615,
"num_lines": 56
} |
""" A custom layer for 'Permute' which is equivalent to transpose in paddle
"""
from .register import register
def permute_shape(input_shape, order):
""" calculate the output shape of this layer using input shapes
Args:
@input_shape (list of numbers): input shape
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
output_shape = []
for ii in order:
assert ii < len(input_shape), "invalid order for permute[%s]" % (name)
output_shape.append(input_shape[ii])
return output_shape
def permute_layer(input, name, order):
""" build a layer of type 'permute' using fluid
Args:
@input (input variable): input fluid variables for this layer
@name (str): name for this layer
@order (list of int): order to permute the dims
Returns:
output (variable): output variable for this layer
"""
import paddle.fluid as fluid
output = fluid.layers.transpose(input, order, name=name)
return output
register(kind='Permute', shape=permute_shape, layer=permute_layer)
| {
"repo_name": "qingqing01/models",
"path": "fluid/image_classification/caffe2fluid/kaffe/custom_layers/permute.py",
"copies": "3",
"size": "1112",
"license": "apache-2.0",
"hash": 8200731848080964000,
"line_mean": 26.8,
"line_max": 81,
"alpha_frac": 0.6618705036,
"autogenerated": false,
"ratio": 4.043636363636364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6205506867236363,
"avg_score": null,
"num_lines": null
} |
""" a custom layer for 'power', maybe we should implement this in standard way.
more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/power.html
"""
from .register import register
def power_shape(input_shape, shape=None):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (list of num): a list of number which represents the input shape
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
return input_shape
def power_layer(input, name, power=1.0, scale=1.0, shift=0.0):
""" build a layer of type 'Power' using fluid
Args:
@input (variables): input fluid variable for this layer
@name (str): name for this layer
@power (float): parameter from caffe's Power layer
@scale (float): parameter from caffe's Power layer
@shift (float): parameter from caffe's Power layer
Returns:
output (variable): output variable for this layer
"""
import paddle.fluid as fluid
scale_out = fluid.layers.scale(
input, scale=scale, bias=shift, bias_after_scale=True)
output = fluid.layers.pow(scale_out, factor=power)
return output
register(kind='Power', shape=power_shape, layer=power_layer)
| {
"repo_name": "kuke/models",
"path": "fluid/PaddleCV/caffe2fluid/kaffe/custom_layers/power.py",
"copies": "1",
"size": "1292",
"license": "apache-2.0",
"hash": 6213474327867211000,
"line_mean": 31.3,
"line_max": 91,
"alpha_frac": 0.6803405573,
"autogenerated": false,
"ratio": 3.9510703363914375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022525821287316585,
"num_lines": 40
} |
""" A custom layer for 'priorbox' which is used in ssd to generate prior box info
Since the order of prior box is different between caffe and paddle,
we use 'slice' and 'concate' ops to align them.
"""
from .register import register
def priorbox_shape(input_shapes, min_size, max_size=None, aspect_ratio=None):
""" calculate the output shape of this layer using input shapes
Args:
@input_shapes (list of tuples): a list of input shapes
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
assert len(input_shapes) == 2, "invalid inputs for Priorbox[%s]" % (name)
fc_shape = input_shapes[0]
N = 1
if not max_size == None:
N += 1
if not aspect_ratio == None:
N += 2 * len(aspect_ratio)
N_bbx = fc_shape[2] * fc_shape[3] * N
output_shape = [1, 2, 4 * N_bbx]
return output_shape
def priorbox_layer(inputs,
name,
min_size,
max_size=None,
aspect_ratio=None,
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
step=0.0,
offset=0.5):
""" build a layer of type 'Priorbox' using fluid
Args:
@inputs (list of variables): input fluid variables for this layer
@name (str): name for this layer
Returns:
output (variable): output variable for this layer
"""
import paddle.fluid as fluid
assert len(inputs) == 2, "invalid inputs for Priorbox[%s]" % (name)
input = inputs[0]
image = inputs[1]
steps = tuple(step) if type(step) is list or type(step) is tuple else (step,
step)
box, variance_ = fluid.layers.prior_box(
input,
image,
min_size,
max_size,
aspect_ratio,
variance,
flip,
clip,
steps,
offset,
min_max_aspect_ratios_order=True)
"""
#adjust layout when the output is not consistent with caffe's
feat_shape = list(input.shape)
H = feat_shape[2]
W = feat_shape[3]
box_tmp = fluid.layers.reshape(box, [H, W, -1, 4])
nb_prior_bbx = int(box_tmp.shape[2])
tensor_list = fluid.layers.split(box_tmp, nb_prior_bbx, 2)
#TODO:
# current implementation for this layer is not efficient
# and we should fix this bug in future when Paddle support the same prior-box layout with Caffe
index_list = [0]
index_list = index_list * nb_prior_bbx
index_offset = 0
if max_size is not None:
index_list[1] = -1
index_offset = 1
for ii in xrange(2 * len(aspect_ratio)):
index_list[ii + 1 + index_offset] = ii + 1
tensor_list_gathered = [tensor_list[ii] for ii in index_list]
caffe_prior_bbx = fluid.layers.concat(tensor_list_gathered, axis=2)
box = fluid.layers.reshape(caffe_prior_bbx, [1, 1, -1])
"""
box = fluid.layers.reshape(box, [1, 1, -1])
variance_ = fluid.layers.reshape(variance_, [1, 1, -1])
output = fluid.layers.concat([box, variance_], axis=1)
return output
register(kind='PriorBox', shape=priorbox_shape, layer=priorbox_layer)
| {
"repo_name": "kuke/models",
"path": "fluid/PaddleCV/caffe2fluid/kaffe/custom_layers/priorbox.py",
"copies": "1",
"size": "3272",
"license": "apache-2.0",
"hash": 4222775013600517000,
"line_mean": 30.7669902913,
"line_max": 101,
"alpha_frac": 0.5770171149,
"autogenerated": false,
"ratio": 3.5642701525054465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4641287267405446,
"avg_score": null,
"num_lines": null
} |
""" a custom layer for 'reshape', maybe we should implement this in standard way.
more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/reshape.html
"""
from .register import register
def import_fluid():
import paddle.fluid as fluid
return fluid
def reshape_shape(input_sp, shape, axis=0, num_axes=-1):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (list of num): a list of number which represents the input shape
@shape (object): parameter from caffe's Reshape layer
@axis (int): parameter from caffe's Reshape layer
@num_axes(int): parameter from caffe's Reshape layer
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
def count(num_list):
return reduce(lambda a, b: a * b, num_list)
input_shape = list(input_sp)
input_count = count(input_shape)
input_num_axes = len(input_shape)
input_start_axis = axis
start_axis = input_start_axis if input_start_axis >= 0 \
else input_num_axes + input_start_axis + 1
assert start_axis >= 0, "[Reshape]axis %d out of range" % (input_start_axis)
assert start_axis <= input_num_axes, "[Reshape]axis %d out of range for %d-D input data"\
% (input_start_axis, input_num_axes)
assert num_axes >= -1, "[Reshape]num_axes must be >= 0, or -1 for all"
end_axis = input_num_axes if num_axes == -1 else start_axis + num_axes
assert end_axis <= input_num_axes, "end_axis[%d] = axis[%d] + num_axes[%d] is out of range"\
% (end_axis, start_axis, num_axes)
num_axes_replaced = end_axis - start_axis
num_axes_retained = input_num_axes - num_axes_replaced
num_new_axes = len(shape['dim'])
output_shape = []
for i in range(start_axis):
output_shape.append(input_shape[i])
for i in range(num_new_axes):
output_shape.append(shape['dim'][i])
for i in range(end_axis, input_num_axes):
output_shape.append(input_shape[i])
assert len(output_shape) == num_axes_retained + num_new_axes,\
"[Reshape]invalid dims of output shape[%s]" % (str(output_shape))
inferred_axis = -1
copy_axes = []
constant_count = 1
for i in range(num_new_axes):
top_dim = shape['dim'][i]
if top_dim == 0:
copy_axes.append(i)
copy_axis_index = start_axis + i
output_shape[copy_axis_index] = input_shape[copy_axis_index]
elif top_dim == -1:
assert inferred_axis == -1, "[Reshape]new shape contains multiple -1 dims"
inferred_axis = i
else:
constant_count *= top_dim
if inferred_axis >= 0:
explicit_count = constant_count
l = input_shape[0:start_axis]
if len(l) > 0:
explicit_count *= count(l)
l = input_shape[end_axis:]
if len(l) > 0:
explicit_count *= count(l)
for i in range(len(copy_axes)):
explicit_count *= output_shape[start_axis + copy_axes[i]]
assert input_count % explicit_count == 0, "[Reshape]botom count[%d] "\
"must be divisible by product of the specified dimensions[%d] "\
% (input_count, explicit_count)
output_shape[start_axis + inferred_axis] = input_count / explicit_count
output_count = count(output_shape)
assert output_count == input_count, "[Reshape]output count[%d] must match input count[%d]" % (
output_count, input_count)
return output_shape
def reshape_layer(input, name, shape, axis=0, num_axes=-1):
""" build a layer of type 'Flatten' using fluid
Args:
@input (variable): input fluid variable for this layer
@name (str): name for this layer
@shape (object): parameter from caffe's Reshape layer
@axis (int): parameter from caffe's Reshape layer
@num_axes(int): parameter from caffe's Reshape layer
Returns:
output (variable): output variable for this layer
"""
fluid = import_fluid()
input_shape = list(input.shape)
if input_shape[0] == -1:
input_shape[0] = 1
output_shape = reshape_shape(input_shape, shape, axis, num_axes)
output_shape[0] = -1
else:
output_shape = reshape_shape(input_shape, shape, axis, num_axes)
output = fluid.layers.reshape(input, shape=output_shape, name=name)
return output
register(kind='Reshape', shape=reshape_shape, layer=reshape_layer)
| {
"repo_name": "lcy-seso/models",
"path": "fluid/image_classification/caffe2fluid/kaffe/custom_layers/reshape.py",
"copies": "3",
"size": "4532",
"license": "apache-2.0",
"hash": -8512078178968749000,
"line_mean": 33.0751879699,
"line_max": 98,
"alpha_frac": 0.6171668138,
"autogenerated": false,
"ratio": 3.5600942655145325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019083506243866215,
"num_lines": 133
} |
""" a custom layer for 'ROIPooling', maybe we should implement this in standard way.
more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/ROIPooling.html
"""
from .register import register
def roipooling_shape(input_shapes, pooled_h, pooled_w, spatial_scale):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (list of num): a list of number which represents the input shape
@out_max_val (bool): parameter from caffe's ROIPooling layer
@top_k (int): parameter from caffe's ROIPooling layer
@axis (int): parameter from caffe's ROIPooling layer
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
assert len(input_shapes) == 2, "not valid input shape for roipooling layer"
base_fea_shape = input_shapes[0]
rois_shape = input_shapes[1]
output_shape = base_fea_shape
output_shape[0] = rois_shape[0]
output_shape[2] = pooled_h
output_shape[3] = pooled_w
return output_shape
def roipooling_layer(inputs, name, pooled_h, pooled_w, spatial_scale):
""" build a layer of type 'ROIPooling' using fluid
Args:
@input (variable): input fluid variable for this layer
@name (str): name for this layer
@out_max_val (bool): parameter from caffe's ROIPooling layer
@top_k (int): parameter from caffe's ROIPooling layer
@axis (int): parameter from caffe's ROIPooling layer
Returns:
output (variable): output variable for this layer
"""
import paddle.fluid as fluid
assert len(inputs) == 2, "not valid input shape for roipooling layer"
base_fea = inputs[0]
rois = inputs[1][:, 1:5]
rois_fea = fluid.layers.roi_pool(base_fea, rois, pooled_h, pooled_w,
spatial_scale)
return rois_fea
register(kind='ROIPooling', shape=roipooling_shape, layer=roipooling_layer)
| {
"repo_name": "kuke/models",
"path": "fluid/PaddleCV/caffe2fluid/kaffe/custom_layers/roipooling.py",
"copies": "3",
"size": "1957",
"license": "apache-2.0",
"hash": -1320765194333184500,
"line_mean": 35.9245283019,
"line_max": 96,
"alpha_frac": 0.6673479816,
"autogenerated": false,
"ratio": 3.6579439252336448,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5825291906833645,
"avg_score": null,
"num_lines": null
} |
""" a custom layer for 'select' which is used to replace standard 'Slice' layer
for converting layer with multiple different output tensors
"""
from .register import register
def select_shape(input_shape, slice_point, axis=1):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (list of num): a list of number which represents the input shape
@slice_point (list): parameter from caffe's Slice layer
@axis (int): parameter from caffe's Slice layer
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
input_shape = list(input_shape)
start = slice_point[0]
if len(slice_point) == 2:
end = slice_point[1]
else:
end = input_shape[axis]
assert end > start, "invalid slice_point with [start:%d, end:%d]"\
% (start, end)
output_shape = input_shape
output_shape[axis] = end - start
return output_shape
def select_layer(input, name, slice_point, axis=1):
""" build a layer of type 'Slice' using fluid
Args:
@input (variable): input fluid variable for this layer
@name (str): name for this layer
@slice_point (list): parameter from caffe's Slice layer
@axis (int): parameter from caffe's Slice layer
Returns:
output (variable): output variable for this layer
"""
import paddle.fluid as fluid
input_shape = list(input.shape)
start = slice_point[0]
if len(slice_point) == 2:
end = slice_point[1]
else:
end = input_shape[axis]
sections = []
if start > 0:
sections.append(start)
pos = len(sections)
sections.append(end - start)
if end != input_shape[axis]:
sections.append(input_shape[axis] - end)
outputs = fluid.layers.split(input, sections, dim=axis, name=name)
return outputs[pos]
register(kind='Select', shape=select_shape, layer=select_layer)
| {
"repo_name": "kuke/models",
"path": "fluid/PaddleCV/caffe2fluid/kaffe/custom_layers/select.py",
"copies": "3",
"size": "1966",
"license": "apache-2.0",
"hash": 6209454951546619000,
"line_mean": 28.3432835821,
"line_max": 85,
"alpha_frac": 0.6388606307,
"autogenerated": false,
"ratio": 3.8777120315581852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6016572662258185,
"avg_score": null,
"num_lines": null
} |
"""A custom list that manages index/position information for its children.
``orderinglist`` is a custom list collection implementation for mapped
relations that keeps an arbitrary "position" attribute on contained objects in
sync with each object's position in the Python list.
The collection acts just like a normal Python ``list``, with the added
behavior that as you manipulate the list (via ``insert``, ``pop``, assignment,
deletion, what have you), each of the objects it contains is updated as needed
to reflect its position. This is very useful for managing ordered relations
which have a user-defined, serialized order::
>>> from sqlalchemy import MetaData, Table, Column, Integer, String, ForeignKey
>>> from sqlalchemy.orm import mapper, relation
>>> from sqlalchemy.ext.orderinglist import ordering_list
A simple model of users their "top 10" things::
>>> metadata = MetaData()
>>> users = Table('users', metadata,
... Column('id', Integer, primary_key=True))
>>> blurbs = Table('user_top_ten_list', metadata,
... Column('id', Integer, primary_key=True),
... Column('user_id', Integer, ForeignKey('users.id')),
... Column('position', Integer),
... Column('blurb', String(80)))
>>> class User(object):
... pass
...
>>> class Blurb(object):
... def __init__(self, blurb):
... self.blurb = blurb
...
>>> mapper(User, users, properties={
... 'topten': relation(Blurb, collection_class=ordering_list('position'),
... order_by=[blurbs.c.position])})
<Mapper ...>
>>> mapper(Blurb, blurbs)
<Mapper ...>
Acts just like a regular list::
>>> u = User()
>>> u.topten.append(Blurb('Number one!'))
>>> u.topten.append(Blurb('Number two!'))
But the ``.position`` attibute is set automatically behind the scenes::
>>> assert [blurb.position for blurb in u.topten] == [0, 1]
The objects will be renumbered automaticaly after any list-changing operation,
for example an ``insert()``::
>>> u.topten.insert(1, Blurb('I am the new Number Two.'))
>>> assert [blurb.position for blurb in u.topten] == [0, 1, 2]
>>> assert u.topten[1].blurb == 'I am the new Number Two.'
>>> assert u.topten[1].position == 1
Numbering and serialization are both highly configurable. See the docstrings
in this module and the main SQLAlchemy documentation for more information and
examples.
The :class:`~sqlalchemy.ext.orderinglist.ordering_list` factory function is the
ORM-compatible constructor for `OrderingList` instances.
"""
from sqlalchemy.orm.collections import collection
from sqlalchemy import util
__all__ = [ 'ordering_list' ]
def ordering_list(attr, count_from=None, **kw):
"""Prepares an OrderingList factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper relation's
``collection_class`` option. Arguments are:
attr
Name of the mapped attribute to use for storage and retrieval of
ordering information
count_from (optional)
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Passes along any keyword arguments to ``OrderingList`` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = 'count_from_%i' % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keywrod arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop('count_from', None)
if kw.get('ordering_func', None) is None and count_from is not None:
if count_from == 0:
kw['ordering_func'] = count_from_0
elif count_from == 1:
kw['ordering_func'] = count_from_1
else:
kw['ordering_func'] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
See the module and __init__ documentation for more details. The
``ordering_list`` factory function is used to configure ``OrderingList``
collections in ``mapper`` relation definitions.
"""
def __init__(self, ordering_attr=None, ordering_func=None,
reorder_on_append=False):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relation.
ordering_attr
Name of the attribute that stores the object's order in the
relation.
ordering_func
Optional. A function that maps the position in the Python list to a
value to store in the ``ordering_attr``. Values returned are
usually (but need not be!) integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
reorder_on_append
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error. Spooky action at a distance.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def reorder(self):
"""Synchronize ordering for the entire collection.
Sweeps through the list and ensures that each object has accurate
ordering information set.
"""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
# As of 0.5, _reorder is no longer semi-private
_reorder = reorder
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have != should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
super(OrderingList, self).insert(index, entity)
self._reorder()
def remove(self, entity):
super(OrderingList, self).remove(entity)
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in xrange(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
# Py2K
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
# end Py2K
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
| {
"repo_name": "obeattie/sqlalchemy",
"path": "lib/sqlalchemy/ext/orderinglist.py",
"copies": "1",
"size": "10847",
"license": "mit",
"hash": 2410058560223590400,
"line_mean": 35.8945578231,
"line_max": 81,
"alpha_frac": 0.6411911128,
"autogenerated": false,
"ratio": 4.149579188982402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5290770301782403,
"avg_score": null,
"num_lines": null
} |
"""A custom list that manages index/position information for its children.
``orderinglist`` is a custom list collection implementation for mapped relations
that keeps an arbitrary "position" attribute on contained objects in sync with
each object's position in the Python list.
The collection acts just like a normal Python ``list``, with the added
behavior that as you manipulate the list (via ``insert``, ``pop``, assignment,
deletion, what have you), each of the objects it contains is updated as needed
to reflect its position. This is very useful for managing ordered relations
which have a user-defined, serialized order::
from sqlalchemy.ext.orderinglist import ordering_list
users = Table('users', metadata,
Column('id', Integer, primary_key=True))
blurbs = Table('user_top_ten_list', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('users.id')),
Column('position', Integer),
Column('blurb', String(80)))
class User(object): pass
class Blurb(object):
def __init__(self, blurb):
self.blurb = blurb
mapper(User, users, properties={
'topten': relation(Blurb, collection_class=ordering_list('position'),
order_by=[blurbs.c.position])
})
mapper(Blurb, blurbs)
u = User()
u.topten.append(Blurb('Number one!'))
u.topten.append(Blurb('Number two!'))
# Like magic.
assert [blurb.position for blurb in u.topten] == [0, 1]
# The objects will be renumbered automaticaly after any list-changing
# operation, for example an insert:
u.topten.insert(1, Blurb('I am the new Number Two.'))
assert [blurb.position for blurb in u.topten] == [0, 1, 2]
assert u.topten[1].blurb == 'I am the new Number Two.'
assert u.topten[1].position == 1
Numbering and serialization are both highly configurable. See the docstrings
in this module and the main SQLAlchemy documentation for more information and
examples.
The [sqlalchemy.ext.orderinglist#ordering_list] function is the ORM-compatible
constructor for OrderingList instances.
"""
__all__ = [ 'ordering_list' ]
def ordering_list(attr, count_from=None, **kw):
"""Prepares an OrderingList factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper relation's
``collection_class`` option. Arguments are:
attr
Name of the mapped attribute to use for storage and retrieval of
ordering information
count_from (optional)
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Passes along any keyword arguments to ``OrderingList`` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = 'count_from_%i' % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keywrod arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop('count_from', None)
if kw.get('ordering_func', None) is None and count_from is not None:
if count_from == 0:
kw['ordering_func'] = count_from_0
elif count_from == 1:
kw['ordering_func'] = count_from_1
else:
kw['ordering_func'] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
See the module and __init__ documentation for more details. The
``ordering_list`` function is used to configure ``OrderingList``
collections in ``mapper`` relation definitions.
"""
def __init__(self, ordering_attr=None, ordering_func=None,
reorder_on_append=False):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relation.
ordering_attr
Name of the attribute that stores the object's order in the relation.
ordering_func
Optional. A function that maps the position in the Python list to a
value to store in the ``ordering_attr``. Values returned are usually
(but need not be!) integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
reorder_on_append
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error. Spooky action at a distance.
Recommend leaving this with the default of False, and just call
``_reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def _reorder(self):
"""Sweep through the list and ensure that each object has accurate
ordering information set."""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have <> should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
def insert(self, index, entity):
self[index:index] = [entity]
def remove(self, entity):
super(OrderingList, self).remove(entity)
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
for i in range(index.start or 0, index.stop or 0, index.step or 1):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
| {
"repo_name": "carlgao/lenga",
"path": "images/lenny64-peon/usr/share/python-support/python-sqlalchemy/sqlalchemy/ext/orderinglist.py",
"copies": "5",
"size": "9530",
"license": "mit",
"hash": -3468236574518892500,
"line_mean": 36.96812749,
"line_max": 80,
"alpha_frac": 0.6495278069,
"autogenerated": false,
"ratio": 4.1982378854625555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007349714516745319,
"num_lines": 251
} |
"""A custom pygments lexer for IPython code cells.
Informs The pygments highlighting library of the quirks of IPython's superset
of Python -- magic commands, !shell commands, etc.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Third-party imports
from pygments.lexers import PythonLexer, BashLexer
from pygments.lexer import bygroups, using
from pygments.token import Keyword, Operator, Text
#-----------------------------------------------------------------------------
# Class declarations
#-----------------------------------------------------------------------------
class IPythonLexer(PythonLexer):
"""
Pygments Lexer for use with IPython code. Inherits from
PythonLexer and adds information about IPython specific
keywords (i.e. magic commands, shell commands, etc.)
"""
#Basic properties
name = 'IPython'
aliases = ['ip', 'ipython']
filenames = ['*.ipy']
#Highlighting information
tokens = PythonLexer.tokens.copy()
tokens['root'] = [
(r'(\%+)(\w+)\s+(\.*)(\n)', bygroups(Operator, Keyword,
using(BashLexer), Text)),
(r'(\%+)(\w+)\b', bygroups(Operator, Keyword)),
(r'^(!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
] + tokens['root']
| {
"repo_name": "noslenfa/tdjangorest",
"path": "uw/lib/python2.7/site-packages/IPython/nbconvert/utils/lexers.py",
"copies": "2",
"size": "1775",
"license": "apache-2.0",
"hash": 4327979201780903400,
"line_mean": 37.5869565217,
"line_max": 78,
"alpha_frac": 0.4766197183,
"autogenerated": false,
"ratio": 5.461538461538462,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6938158179838462,
"avg_score": null,
"num_lines": null
} |
# A custom Python file to containing utility (helper)
# [mostly independent] functions that are sometimes
# used within the app
import json
# JSON dump object
def dump(obj):
return json.dumps(obj.__dict__)
# parses attributes of a query row object to a dict
def row2dict(row):
d = {}
for column in row.__table__.columns:
d[column.name] = str(getattr(row, column.name))
return d
# verifies if 'filename' has a valid (required) extension
def allowed_file(filename, allw_ext):
return '.' in filename and \
filename.rsplit('.', 1)[1] in allw_ext
# check if given string is a mix of alphanumeric characters
def contains_alnum(string):
''' Returns true if string contains both
alphabets and numbers
'''
al, num = 0, 0
for s in string:
if s.isalpha():
al +=1
if s.isdigit():
num +=1
if al <=0 or num<=0:
return False
return True
# generates a random string that defaults to 8 characters
def get_random_string(n=8):
import string, random
return ''.join(random.SystemRandom().choice(string.ascii_letters+string.digits) for _ in range(n))
import db_ops
# send given notification message to given user
def notify(user, msg):
pass
####TODO: Update parameters as required
#db_ops.insert_val(db_ops.[[db_notification_model]], dict([[db_notification_owner_id_attrib]]=user.[[notification_owner_id_attrib]], [[db_notification_message_attrib]]=msg))
# given [conversation] message from given 'sender' to given 'recipient'
def send_msg(sender, recp, msg):
pass
####TODO: Update parameters as required
#db_ops.insert_val(db_ops.[[db_message_model]], \ dict([[db_message_sender_id_attrib]]=sender.[[sender_id_attrib]], [[db_message_recipient_id__attrib]] = recp.[[recipient_id_attrib]], \[[db_message_text_attrib]]=msg))
| {
"repo_name": "takwas/flask_app_template",
"path": "template_app/utils.py",
"copies": "1",
"size": "1799",
"license": "mit",
"hash": -5845515224720204000,
"line_mean": 22.9866666667,
"line_max": 218,
"alpha_frac": 0.6903835464,
"autogenerated": false,
"ratio": 3.3314814814814815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.925689929057148,
"avg_score": 0.05299314746200038,
"num_lines": 75
} |
'''A custom rq worker class to add start & stop SNS messages to all jobs'''
import logging
import os
import re
from rq.worker import HerokuWorker
from harvester.sns_message import publish_to_harvesting
logger = logging.getLogger(__name__)
# need tuple of tuple pairs, regex string to msg template
# the regex needs to match the function called
# and parse out the collection id
message_match_list = (
("sync_couch_collection_to_solr\(collection_key='(?P<cid>\d+)'\)",
"{status}: Sync from Couchdb to Solr {env} on "
":worker: {worker} for CID: {cid}"),
("run_ingest.main.*/collection/(?P<cid>\d+)/",
"{status}: Metadata Harvest to Couchdb {env} on "
":worker: {worker} for CID: {cid}"),
("image_harvest.main\(collection_key=.*'(?P<cid>\d+)'",
"{status}: Image Harvest {env} on "
":worker: {worker} for CID: {cid}"),
("delete_solr_collection\(collection_key='(?P<cid>\d+)'\)",
"{status}: Delete from Solr {env} on "
":worker: {worker} for CID: {cid}"),
("s3stash.stash_collection.main\(registry_id=(?P<cid>\d+)",
"{status}: Nuxeo Deep Harvest on "
":worker: {env} {worker} for CID: {cid}"),
("delete_collection\((?P<cid>\d+)\)",
"{status}: Delete CouchDB {env} on "
":worker: {worker} for CID: {cid}"),
("couchdb_sync_db_by_collection.main\(url_api_collection="
"'https://registry.cdlib.org/api/v1/collection/(?P<cid>\d+)/'",
"{status}: Sync CouchDB to production on "
":worker: {env} {worker} for CID: {cid}"),
("<fn--name> -- parse out collection id as cid ",
"replacement template for message- needs cid env variables"))
re_object_auth = re.compile("object_auth=(\('\w+', '\S+'\))")
def create_execute_job_message(status, worker, job):
'''Create a formatted message for the job.
Searches for a match to function, then fills in values
'''
env = os.environ.get('DATA_BRANCH')
message_template = "{status}: {env} {worker} {job}"
message = message_template.format(
status=status, env=env, worker=worker, job=job.description)
subject = message
for regex, msg_template in message_match_list:
m = re.search(regex, job.description)
if m:
mdict = m.groupdict()
subject = msg_template.format(
status=status,
env=env,
worker=worker,
cid=mdict.get('cid', '?'))
message = ''.join((subject, '\n', job.description))
break
message = re_object_auth.sub('object_auth=<REDACTED>', message)
return subject, message
def exception_to_sns(job, *exc_info):
'''Make an exception handler to report exceptions to SNS msg queue'''
subject = 'FAILED: job {}'.format(job.description)
message = 'ERROR: job {} failed\n{}'.format(job.description, exc_info[1])
logging.error(message)
publish_to_harvesting(subject, message)
class SNSWorker(HerokuWorker):
def execute_job(self, job, queue):
"""Spawns a work horse to perform the actual work and passes it a job.
The worker will wait for the work horse and make sure it executes
within the given timeout bounds, or will end the work horse with
SIGALRM.
"""
worker_name = (self.key.rsplit(':', 1)[1]).rsplit('.', 1)[0]
subject, msg = create_execute_job_message("Started", worker_name, job)
logging.info(msg)
publish_to_harvesting(subject, msg)
self.set_state('busy')
self.fork_work_horse(job, queue)
self.monitor_work_horse(job)
subject, msg = create_execute_job_message("Completed", worker_name,
job)
logging.info(msg)
publish_to_harvesting(subject, msg)
self.set_state('idle')
| {
"repo_name": "barbarahui/harvester",
"path": "harvester/rq_worker_sns_msgs.py",
"copies": "3",
"size": "3794",
"license": "bsd-3-clause",
"hash": -715553217585658400,
"line_mean": 40.2391304348,
"line_max": 78,
"alpha_frac": 0.6122825514,
"autogenerated": false,
"ratio": 3.526022304832714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5638304856232713,
"avg_score": null,
"num_lines": null
} |
"""A custom utils library used across docker scripts."""
import argparse
import dns
import inspect
import json
import os
import time
def standard_arg_parser(desc):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=desc)
parser.add_argument(
'-i-', '--image',
action='store',
default='onedata/worker',
help='docker image to use for the container',
dest='image')
parser.add_argument(
'-b', '--bin',
action='store',
default=os.getcwd(),
help='path to the code repository (precompiled)',
dest='bin')
parser.add_argument(
'-d', '--dns',
action='store',
default='auto',
help='IP address of DNS or "none" - if no dns should be started or \
"auto" - if it should be started automatically',
dest='dns')
parser.add_argument(
'-u', '--uid',
action='store',
default=generate_uid(),
help='uid that will be concatenated to docker names',
dest='uid')
parser.add_argument(
'config_path',
action='store',
help='path to json configuration file')
return parser
def merge(d, merged):
"""Merge the dict merged into dict d by adding their values on
common keys
"""
for key, value in iter(merged.items()):
d[key] = d[key] + value if key in d else value
def set_up_dns(config, uid):
"""Sets up DNS configuration values, starting the server if needed."""
if config == 'auto':
dns_config = dns.up(uid)
return [dns_config['dns']], dns_config
if config == 'none':
return [], {}
return [config], {}
def get_file_dir(file_path):
"""Returns the absolute path to directory containing given file"""
return os.path.dirname(os.path.realpath(file_path))
def get_script_dir():
"""Returns the absolute path to directory containing the caller script"""
caller = inspect.stack()[1]
caller_mod = inspect.getmodule(caller[0])
return get_file_dir(caller_mod.__file__)
def parse_json_file(path):
"""Parses a JSON file and returns a dict."""
with open(path, 'r') as f:
return json.load(f)
def format_hostname(node_name, uid):
"""Formats hostname for a docker based on node name and uid.
node_name can be in format 'somename@' or 'somename'.
"""
(name, _, _) = node_name.partition('@')
return '{0}.{1}.dev.docker'.format(name, uid)
def format_nodename(node_name, uid):
"""Formats full node name for a docker based on node name and uid
node_name can be in format 'somename@' or 'somename'.
This is needed so different components are resolvable through DNS.
"""
(name, _, _) = node_name.partition('@')
return '{0}@{1}'.format(name, format_hostname(node_name, uid))
def format_dockername(node_name, uid):
"""Formats docker name based on node name and uid
node_name can be in format 'somename@' or 'somename'.
This is needed so different components are resolvable through DNS.
"""
(name, _, _) = node_name.partition('@')
return '{0}_{1}'.format(name, uid)
def generate_uid():
"""Returns a uid (based on current time),
that can be used to group dockers in DNS
"""
return str(int(time.time()))
| {
"repo_name": "xorver/oneprovider_ccm",
"path": "bamboos/docker/environment/common.py",
"copies": "1",
"size": "3352",
"license": "mit",
"hash": -7336870093287537000,
"line_mean": 26.7024793388,
"line_max": 77,
"alpha_frac": 0.616646778,
"autogenerated": false,
"ratio": 3.8796296296296298,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.999627640762963,
"avg_score": 0,
"num_lines": 121
} |
"""AcyclicReachability.py
Bit-parallel algorithm for testing which vertices can reach which other
vertices in a DAG.
Usage:
R = Reachability(G)
...
R.reachable(source,destination)
returns a boolean value: True if G contains a path from the source vertex
to the destination vertex, and False otherwise. The initialization of R
performs a linear number of bitvector operations, after which each reachability
test takes constant time to perform.
D. Eppstein, April 2009.
"""
from .partial_order import TopologicalOrder
class Reachability:
def __init__(self, G):
"""Initialize a reachability data structure for the given DAG."""
self.key = {}
self.canReach = []
L = TopologicalOrder(G)
L.reverse()
for v in L:
k = self.key[v] = len(self.canReach)
bits = 1 << k
for w in G[v]:
bits |= self.canReach[self.key[w]]
self.canReach.append(bits)
def reachable(self, source, destination):
"""Test whether the DAG has a path from source to destination."""
return ((1 << self.key[destination]) & self.canReach[self.key[source]]
!= 0)
| {
"repo_name": "jfinkels/PADS",
"path": "pads/acyclic_reachability.py",
"copies": "1",
"size": "1191",
"license": "mit",
"hash": 2496563003467608000,
"line_mean": 29.5384615385,
"line_max": 79,
"alpha_frac": 0.6389588581,
"autogenerated": false,
"ratio": 3.866883116883117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 39
} |
#AD 440
#Cloud Practicum
#
#File contains methods for all api routing calls.
#This includes methods for GET,PUT,POST,DELETE
#
#
from flask import Flask
from flask import make_response
from flask import request
from flask import jsonify
from azure_components import static
from azure_components.api_methods import *
from flask.ext.cors import CORS
#declare flask app
app = Flask(__name__)
#calls CORS method and gives the app Cross-Origin Resource Sharing
#for all domains
CORS(app)
#route for get images call, GET METHOD
@app.route("/getImages", methods=['GET'])
def getImages():
#header variables for get api method
username = request.headers.get('username')
token = request.headers.get('token')
secret = request.headers.get('secret')
#checks if the mandatory headers are null
if not all((username, token, secret)):
#returns json error for null header parameters
rtn_error = jsonify(request=static.app_json.api_parameters_error_json)
return rtn_error
#gets then checks if timestamp is null
#timestamp is an optional header
timestamp = request.headers.get('timestamp')
if timestamp is None:
#assigns 0 for null timestamp
#the timestamp doesn't need to be used for the first page of
#results
timestamp = 0
tags = request.headers.get('tags')
#checks is tags were given
if tags is None:
#assigns empty array
tags_new = []
else:
#splits tags by comma delimeter into an array
tags_new = tags.split(',')
prev = request.headers.get('prev')
#checks if prev variable is set
if prev is None:
# updates variable to false
prev = 'false'
#calls get image method to get JSON from api method (get)
rtn_json = getImagesJSON(timestamp, prev, tags_new, username, token, secret)
#converts to json and returns json request
rtn_body = jsonify(request=rtn_json)
return rtn_body
#route for uploading images, POST METHOD
@app.route("/uploadImage", methods=['POST'])
def uploadImage():
#header variables for post api method
username = request.headers.get('username')
blob = request.data
filename = request.headers.get('filename')
token = request.headers.get('token')
secret = request.headers.get('secret')
#checks if the mandatory headers are null
if not all((username, blob, filename, token, secret)):
#returns json error for null header parameters
rtn_error = jsonify(request=static.app_json.api_parameters_error_json)
return rtn_error
tags = request.headers.get('tags')
#checks is tags were given
if tags is None:
#assigns empty array
tags_new = []
else:
#splits tags by comma delimeter into an array
tags_new = tags.split(',')
#calls upload image method (post)
rtn_json = uploadImageJSON(username, blob, filename, token, secret, tags_new)
#converts to json and returns json request
rtn_body = jsonify(request=rtn_json)
return rtn_body
#route for deleting images, DELETE METHOD
@app.route("/deleteImage", methods=['DELETE'])
def deleteImage():
#header variables for delete api method
blobURL = request.headers.get('blobURL')
token = request.headers.get('token')
secret = request.headers.get('secret')
#checks if the mandatory headers are null
if not all((blobURL, token, secret)):
#returns json error for null header parameters
rtn_error = jsonify(request=static.app_json.api_parameters_error_json)
return rtn_error
#calls delete image method (delete)
rtn_json = deleteImageJSON(blobURL, token, secret)
#converts to json and returns json request
rtn_body = jsonify(request=rtn_json)
return rtn_body
#route for updating tags, PUT METHOD
@app.route("/updateTags", methods=['PUT'])
def updateTags():
#header variables for put api method
blobURL = request.headers.get('blobURL')
token = request.headers.get('token')
secret = request.headers.get('secret')
#checks if the mandatory headers are null
if not all((blobURL, token, secret)):
#returns json error for null header parameters
rtn_error = jsonify(request=static.app_json.api_parameters_error_json)
return rtn_error
tags = request.headers.get('tags')
#checks is tags were given
if tags is None:
#assigns empty array
tags_new = []
else:
#splits tags by comma delimeter into an array
tags_new = tags.split(',')
#calls update tags method (put)
rtn_json = updateTagsJSON(blobURL, tags_new, token, secret)
#converts to json and returns json request
rtn_body = jsonify(request=rtn_json)
return rtn_body
#initializes app and runs debug
if __name__ == "__main__":
app.debug=True
app.run()
| {
"repo_name": "rjhunter8285/nsc-cloudproject-s22016",
"path": "api/FlaskApp/FlaskApp/__init__.py",
"copies": "2",
"size": "4858",
"license": "apache-2.0",
"hash": 7035977020739480000,
"line_mean": 31.3866666667,
"line_max": 81,
"alpha_frac": 0.6770275834,
"autogenerated": false,
"ratio": 3.927243330638642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5604270914038643,
"avg_score": null,
"num_lines": null
} |
"""ADA Claim Form Fields
all dates formatted '%m/%d/%Y'
HEADER INFORMATION
1. Type of Transaction
Statement of Actual Services
Request for Predetermination
2. Predetermination/Preauthorization Number
PRIMARY PAYER INFORMATION
3. Name & Address
OTHER COVERAGE
4. Other Dental or Medical Coverage? (No, Yes)
5. Subscriber Name
6. Date of Birth
7. Gender
8. Subscriber ID
9. Plan/Group Number
10. Relationship (Self, Spouse, Dependent, Other)
11. Other Carrier Name & Address
PRIMARY SUBSCRIBER
12. Name & Address
13. Date of Birth
14. Gender
15. Subscriber ID
16. Plan/Group Number
17. Employer Name
PATIENT INFORMATION
18. Relationship (Self, Spouse, Dependent, Other)
19. Student Status (Full/Part Time)
20. Name & Address
21. Date of Birth (MM/DD/CCYY)
22. Gender
23. Patient ID/Account # (assigned by dentist)
RECORD OF SERVICES
24. Procedure Date
25. Area of Oral Cavity
26. Tooth System
27. Tooth Number(s)
28. Tooth Surface(s)
29. Procedure
30. Description
31. Fee
32. Other Fee(s)
33. Total Fee
34. Place an X on each missing tooth
35. Remarks
AUTHORIZATIONS
36. Patient/Guardian Signature
37. Assignment of Benefits
ANCILLARY INFORMATION
38. Place of Treatment (Office, Hospital, ECF, Other)
39. Number of enclosures
40. Is Treatment for Orthodontics? (No, Yes)
41. Date Appliance Placed
42. Months of Treatment Remaining
43. Replacement of Prosthesis? (No, Yes)
44. Date Prior Placement
45. Treatment Resulting From (Occupational illness, Auto accident, Other accident)
46. Date of Accident
47. Auto Accident State
BILLING DENTIST OR ENTITY
48. Name & Address
49. NPI
50. License Number
51. SSN or TIN
52. Phone Number
52A. Additional Provider ID
TREATING DENTIST
53. Signature
54. NPI
55. License Number
56. Address
56A. Treating Provider Specialty
57. Phone Number
58. Additional Provider ID
Whew!
"""
import forms
import hello
import model
import web
class update_claim:
def POST(self, claimid):
claimid = int(claimid)
journal = model.get_journal_entry(claimid)
claim = model.get_claim(claimid)
txs = model.get_tx_for_claim(claimid)
pt = model.get_pt(journal.patientid)
# validate form input
inp = web.input()
form = forms.claim()
if not form.validates(inp):
plan = model.get_plan(claim.planid)
return hello.render.claim(pt, claim, form, plan, txs)
# update the claim
model.db.update('claim', where='journalid=%d' % journal.id,
notes=form.notes.get_value())
# update the journal
model.db.update('journal', where='id=%d' % journal.id,
summary=form.summary.get_value())
# now go through and update the treatment
deltains = 0.0
deltapt = 0.0
for tx in txs:
fee = inp['fee%d' % tx.id]
if fee:
fee = float(fee)
else:
fee = 0.0
allowed = inp['allowed%d' % tx.id]
if allowed:
allowed = float(allowed)
else:
allowed = None
inspaid = inp['inspaid%d' % tx.id]
if inspaid:
inspaid = float(inspaid)
else:
inspaid = None
ptpaid = inp['ptpaid%d' % tx.id]
if ptpaid:
ptpaid = float(ptpaid)
else:
ptpaid = None
deltains += (inspaid or 0.0) - float(tx.inspaid or 0.0)
deltapt += (ptpaid or 0.0) - float(tx.ptpaid or 0.0)
model.db.update('tx', where='id=%d' % tx.id,
fee=fee,
allowed=allowed,
inspaid=inspaid,
ptpaid=ptpaid)
if deltains >= 0.01:
model.new_payment_for_pt(pt, 'insurance payment', -deltains)
if deltapt >= 0.01:
model.new_payment_for_pt(pt, 'patient payment', -deltapt)
raise web.seeother('/journal/%d' % journal.id)
| {
"repo_name": "homey1337/efnpractice",
"path": "claim.py",
"copies": "1",
"size": "4050",
"license": "bsd-2-clause",
"hash": -1701860580304280800,
"line_mean": 22.4104046243,
"line_max": 82,
"alpha_frac": 0.6125925926,
"autogenerated": false,
"ratio": 3.32239540607055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.443498799867055,
"avg_score": null,
"num_lines": null
} |
""" A daemon thread manager that can be controlled from the privledge shell
"""
from privledge import block
from privledge import settings
from privledge import utils
from privledge import messaging
from privledge.ledger import Ledger
import json
import socket
ledger = None
peers = dict()
disc_ledgers = dict()
disc_peers = set()
privkey = None
_udp_thread = None
_udp_hb_thread = None
_tcp_thread = None
def joined():
return ledger is not None
def is_root():
if ledger is not None and ledger.root is not None and privkey is not None:
return ledger.root.message == utils.encode_key(privkey)
else:
return False
# Create a ledger with a new public and private key
def create_ledger(key):
global ledger, privkey
# Create root block
root_block = block.Block(block.BlockType.key, None, utils.encode_key(key))
root_block.sign(key)
ledger = Ledger()
ledger.append(root_block)
privkey = key
# Start Listeners
ledger_listeners(True)
def ledger_listeners(start):
global _udp_thread, _udp_hb_thread, _tcp_thread
if start:
# Spawn UDP Persistent Listener thread
_udp_thread = messaging.UDPListener(settings.BIND_IP, settings.BIND_PORT)
_udp_thread.start()
# Spawn TCP Listener thread
_tcp_thread = messaging.TCPListener(settings.BIND_IP, settings.BIND_PORT)
_tcp_thread.start()
# Spawn UDP Heartbeat thread
_udp_hb_thread = messaging.UDPHeartbeat()
_udp_hb_thread.start()
else:
# Kill udp listener thread
if _udp_thread is not None:
utils.log_message("Killing UDP Listening Thread...")
_udp_thread.stop.set()
_udp_thread.join()
_udp_thread = None
# Kill tcp listener thread
if _tcp_thread is not None:
utils.log_message("Killing TCP Listening Thread...")
_tcp_thread.stop.set()
_tcp_thread.join()
_tcp_thread = None
# Kill udp hb thread
if _udp_hb_thread is not None:
utils.log_message("Killing Heartbeat Thread...")
_udp_hb_thread.stop.set()
_udp_hb_thread.join()
_udp_hb_thread = None
# Join a ledger with a specified public key
def join_ledger(public_key_hash, member):
global ledger
# Check to make sure we aren't part of a ledger yet
if joined():
print("You are already a member of a ledger")
return
utils.log_message("Spawning TCP Connection Thread to {0}".format(member))
join_message = messaging.Message(settings.MSG_TYPE_JOIN, public_key_hash).prep_tcp()
thread = messaging.TCPMessageThread(member, join_message)
thread.start()
thread.join()
# If the message is a success, import the key
try:
message = json.loads(thread.message, object_hook=utils.message_decoder)
if message.msg_type == settings.MSG_TYPE_SUCCESS:
key = utils.get_key(message.msg)
key_hash = utils.gen_hash(utils.encode_key(key))
if public_key_hash == key_hash:
# Hooray! We have a match
utils.log_message("Joined ledger {}".format(public_key_hash), utils.Level.FORCE)
# Sync Ledger
messaging.block_sync(member)
# Request peers
messaging.peer_sync(member)
# Start Listeners
ledger_listeners(True)
else:
raise ValueError('Public key returned does not match requested hash: {0}'.format(key_hash))
else:
raise ValueError('Response was not as expected: {0}'.format(message.msg_type))
except (ValueError, TypeError) as e:
utils.log_message("Not a valid response from {0}: {1}".format(member, e))
def leave_ledger():
global ledger, _udp_thread, _tcp_thread
# Kill the listners
ledger_listeners(False)
if ledger is not None:
message = "Left ledger {0}".format(ledger.id)
ledger = None
else:
message = "Not a member of a ledger, cannot leave"
return message
def discover(ip='<broadcast>', port=settings.BIND_PORT, timeout = settings.DISCOVERY_TIMEOUT):
utils.log_message("Starting Discovery for {} seconds".format(timeout))
results = dict()
# Get our IP address - I don't like this hack but it works
# https://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python/24196955
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect((ip, port))
ip_self = s.getsockname()[0]
s.close()
# Send out discovery query
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
message = messaging.Message(settings.MSG_TYPE_DISCOVER).__repr__()
s.sendto(message.encode(), (ip, port))
try:
# Listen for responses for 10 seconds
s.settimeout(timeout)
while True:
data, address = s.recvfrom(4096)
try:
message = json.loads(data.decode(), object_hook=utils.message_decoder)
if message.msg_type == settings.MSG_TYPE_SUCCESS:
utils.log_message("Discovered ledger {0} at {1}".format(message.msg, address), utils.Level.MEDIUM)
# Received response
# Is the response our own ledger?
if address[0] == ip_self:
continue
# Is the hash already in our list?
if message.msg not in results:
# If hash isn't in the list, create a new set and add address to it
results[message.msg] = set()
# Since there's already a set for our hash, we add to it
results[message.msg].add(address)
except:
utils.log_message("Malformed response from {0}: {1}".format(data, address))
except OSError as e:
utils.log_message("Exception: {0}".format(e))
finally:
s.close()
return results
| {
"repo_name": "elBradford/privledge",
"path": "privledge/daemon.py",
"copies": "1",
"size": "6320",
"license": "mit",
"hash": 8597350922429139000,
"line_mean": 29.8292682927,
"line_max": 118,
"alpha_frac": 0.6132911392,
"autogenerated": false,
"ratio": 3.8210399032648126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9926704074839421,
"avg_score": 0.0015253935250782431,
"num_lines": 205
} |
""" Adafactor Optimizer
Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py
Original header/copyright below.
"""
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import math
class Adafactor(torch.optim.Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate depending on the
*scale_parameter*, *relative_step* and *warmup_init* options.
To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
`relative_step=False`.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): external learning rate (default: None)
eps (tuple[float, float]): regularization constants for square gradient
and parameter scale respectively (default: (1e-30, 1e-3))
clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0)
decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8)
beta1 (float): coefficient used for computing running averages of gradient (default: None)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True)
relative_step (bool): if True, time-dependent learning rate is computed
instead of external learning rate (default: True)
warmup_init (bool): time-dependent learning rate computation depends on
whether warm-up initialization is being used (default: False)
"""
def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0,
decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False):
relative_step = lr is None
if warmup_init and not relative_step:
raise ValueError('warmup_init requires relative_step=True')
beta1 = None if betas is None else betas[0] # make it compat with standard betas arg
defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate,
beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,
relative_step=relative_step, warmup_init=warmup_init)
super(Adafactor, self).__init__(params, defaults)
@staticmethod
def _get_lr(param_group, param_state):
if param_group['relative_step']:
min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2
lr_t = min(min_step, 1.0 / math.sqrt(param_state['step']))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps_scale'], param_state['RMS'])
param_group['lr'] = lr_t * param_scale
return param_group['lr']
@staticmethod
def _get_options(param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group['beta1'] is not None
return factored, use_first_moment
@staticmethod
def _rms(tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state['step'] = 0
if use_first_moment:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad)
state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].to(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state['step'] += 1
state['RMS'] = self._rms(p_data_fp32)
lr_t = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
update = grad ** 2 + group['eps']
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))
exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))
#exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) # pytorch 1.6+
#exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)
#exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) # pytorch 1.6+
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))
update.mul_(lr_t)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group["beta1"]).add_(1 - group["beta1"], update)
#exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) # pytorch 1.6+
update = exp_avg
if group['weight_decay'] != 0:
p_data_fp32.add_(-group["weight_decay"] * lr_t, p_data_fp32)
#p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * lr_t) # pytorch 1.6+
p_data_fp32.add_(-update)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss | {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/optim/adafactor.py",
"copies": "1",
"size": "8126",
"license": "apache-2.0",
"hash": 1195391183907307300,
"line_mean": 45.7068965517,
"line_max": 114,
"alpha_frac": 0.5617770121,
"autogenerated": false,
"ratio": 3.737810487580497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47995874996804966,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.