hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c42ef0d6619bccee045b8194559a4e329ad636f
| 2,325
|
py
|
Python
|
securitybot/auth/auth.py
|
gyrospectre/securitybot
|
90db2ae532667c48ca080108b895c2e1fe16b1e8
|
[
"Apache-2.0"
] | 3
|
2020-10-09T04:46:15.000Z
|
2021-12-30T10:12:37.000Z
|
securitybot/auth/auth.py
|
gyrospectre/securitybot
|
90db2ae532667c48ca080108b895c2e1fe16b1e8
|
[
"Apache-2.0"
] | null | null | null |
securitybot/auth/auth.py
|
gyrospectre/securitybot
|
90db2ae532667c48ca080108b895c2e1fe16b1e8
|
[
"Apache-2.0"
] | 1
|
2020-08-11T19:28:13.000Z
|
2020-08-11T19:28:13.000Z
|
'''
An authentication object for doing 2FA on Slack users.
'''
__author__ = 'Alex Bertsch, Antoine Cardon'
__email__ = 'abertsch@dropbox.com, antoine.cardon@algolia.com'
import pytz
from datetime import datetime, timedelta
from abc import ABCMeta, abstractmethod
from enum import Enum, unique
@unique
class AuthStates(Enum):
NONE = 1
PENDING = 2
AUTHORIZED = 3
DENIED = 4
class BaseAuthClient(object, metaclass=ABCMeta):
'''
When designing Auth subclasses, try to make sure that the authorization
attempt is as non-blocking as possible.
'''
@abstractmethod
def __init__(self, reauth_time, auth_attrib):
'''
Initialise default values for global config
'''
self.reauth_time = reauth_time
self.auth_attrib = auth_attrib
self.auth_time = timedelta(seconds=self.reauth_time)
def _auth_attribute(self, user):
# Return the attribute of a User object that
# will be used to match to the auth platform.
if self.auth_attrib == 'username':
return user['name']
elif user.get_email() and self.auth_attrib == 'email':
return user.get_email()
elif user.get_displayname() and self.auth_attrib == 'displayname':
return user.get_displayname()
return False
@abstractmethod
def can_auth(self) -> bool:
'''
Returns:
(bool) Whether 2FA is available.
'''
raise NotImplementedError()
@abstractmethod
def auth(self, reason: str = None) -> None:
'''
Begins an authorization request, which should be non-blocking.
Args:
reason (str): Optional reason string that may be provided
'''
raise NotImplementedError()
def _recently_authed(self, user):
# type: () -> bool
return (
(datetime.now(tz=pytz.utc) - user._last_auth_time) <
timedelta(seconds=self.reauth_time)
)
@abstractmethod
def auth_status(self) -> int:
'''
Returns:
(enum) The current auth status, one of AUTH_STATES.
'''
raise NotImplementedError()
@abstractmethod
def reset(self) -> None:
'''
Resets auth status.
'''
raise NotImplementedError()
| 25.833333
| 75
| 0.615484
|
__author__ = 'Alex Bertsch, Antoine Cardon'
__email__ = 'abertsch@dropbox.com, antoine.cardon@algolia.com'
import pytz
from datetime import datetime, timedelta
from abc import ABCMeta, abstractmethod
from enum import Enum, unique
@unique
class AuthStates(Enum):
NONE = 1
PENDING = 2
AUTHORIZED = 3
DENIED = 4
class BaseAuthClient(object, metaclass=ABCMeta):
@abstractmethod
def __init__(self, reauth_time, auth_attrib):
self.reauth_time = reauth_time
self.auth_attrib = auth_attrib
self.auth_time = timedelta(seconds=self.reauth_time)
def _auth_attribute(self, user):
if self.auth_attrib == 'username':
return user['name']
elif user.get_email() and self.auth_attrib == 'email':
return user.get_email()
elif user.get_displayname() and self.auth_attrib == 'displayname':
return user.get_displayname()
return False
@abstractmethod
def can_auth(self) -> bool:
raise NotImplementedError()
@abstractmethod
def auth(self, reason: str = None) -> None:
raise NotImplementedError()
def _recently_authed(self, user):
return (
(datetime.now(tz=pytz.utc) - user._last_auth_time) <
timedelta(seconds=self.reauth_time)
)
@abstractmethod
def auth_status(self) -> int:
raise NotImplementedError()
@abstractmethod
def reset(self) -> None:
raise NotImplementedError()
| true
| true
|
1c42efec5fd44741cee47e37a2eb70dd452d2d50
| 622
|
py
|
Python
|
pcap3103/inhertiance_lab/employee.py
|
owaishanif786/python
|
212d626e10bebf161ee123459dc5f0384d9540ac
|
[
"MIT"
] | null | null | null |
pcap3103/inhertiance_lab/employee.py
|
owaishanif786/python
|
212d626e10bebf161ee123459dc5f0384d9540ac
|
[
"MIT"
] | null | null | null |
pcap3103/inhertiance_lab/employee.py
|
owaishanif786/python
|
212d626e10bebf161ee123459dc5f0384d9540ac
|
[
"MIT"
] | null | null | null |
class Employee:
def __init__(self, name, title, email_address, phone_number=''):
self.name = name
self.title = title
self.email_address = email_address
self.phone_number = phone_number
def email_signature(self, include_phone=False):
signature = f'{self.name}\n{self.title}\n{self.email_address}\n'
if(include_phone == True):
signature += f'{self.phone_number}\n'
print(signature)
if __name__ == '__main__':
emp = Employee('lil', 'lilWolf', 'lil@example.com', '030012345')
emp.email_signature()
emp.email_signature(True)
| 34.555556
| 72
| 0.633441
|
class Employee:
def __init__(self, name, title, email_address, phone_number=''):
self.name = name
self.title = title
self.email_address = email_address
self.phone_number = phone_number
def email_signature(self, include_phone=False):
signature = f'{self.name}\n{self.title}\n{self.email_address}\n'
if(include_phone == True):
signature += f'{self.phone_number}\n'
print(signature)
if __name__ == '__main__':
emp = Employee('lil', 'lilWolf', 'lil@example.com', '030012345')
emp.email_signature()
emp.email_signature(True)
| true
| true
|
1c42eff9feef990ed123d1f30a221e445dc6734a
| 1,000
|
py
|
Python
|
OpenCV2/Feature_Detection_and_Description/Corner_Detection_with_Shi-Tomasi_coner_method.py
|
siddharth-143/Python
|
293f4643a3a13e3b82d23fd8922db54dbb0f12bc
|
[
"MIT"
] | null | null | null |
OpenCV2/Feature_Detection_and_Description/Corner_Detection_with_Shi-Tomasi_coner_method.py
|
siddharth-143/Python
|
293f4643a3a13e3b82d23fd8922db54dbb0f12bc
|
[
"MIT"
] | null | null | null |
OpenCV2/Feature_Detection_and_Description/Corner_Detection_with_Shi-Tomasi_coner_method.py
|
siddharth-143/Python
|
293f4643a3a13e3b82d23fd8922db54dbb0f12bc
|
[
"MIT"
] | null | null | null |
"""
Corner Detection with Shi-Tomasi Coner Detection method
"""
# Python progran to illustrate
# corner detection with
# Shi-Tomasi detection method
# organizing imports
import cv2
import numpy as np
import matplotlib.pyplot as plt
# path to input image specified and
# image is loaded with imread command
img = cv2.imread("../images/1.jpeg")
# convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Shi-Tomasi corner detection function
# we are detecting only 100 best corner here
# you can change the number to get desired result
corner = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)
# convert corner value to integer
# so that we will be able to draw circles on them
corner = np.int0(corner)
# draw red color circles on all corners
for i in corner:
x, y = i.ravel()
cv2.circle(img, (x, y), 3, (255, 0, 0), -1)
# resulting image
plt.imshow(img)
plt.show()
# de-allocate any associated memory usage
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
| 24.390244
| 59
| 0.729
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("../images/1.jpeg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corner = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)
corner = np.int0(corner)
for i in corner:
x, y = i.ravel()
cv2.circle(img, (x, y), 3, (255, 0, 0), -1)
plt.imshow(img)
plt.show()
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
| true
| true
|
1c42f07b5d8264b529a6f72cf2bb73a8a179756e
| 1,152
|
py
|
Python
|
twitter-clone/twitter/migrations/0004_auto_20201003_1527.py
|
Mlitwin98/twitter-clone
|
4fbe754a4693c39ac4e9623f51ca42a7facecd2e
|
[
"MIT"
] | null | null | null |
twitter-clone/twitter/migrations/0004_auto_20201003_1527.py
|
Mlitwin98/twitter-clone
|
4fbe754a4693c39ac4e9623f51ca42a7facecd2e
|
[
"MIT"
] | null | null | null |
twitter-clone/twitter/migrations/0004_auto_20201003_1527.py
|
Mlitwin98/twitter-clone
|
4fbe754a4693c39ac4e9623f51ca42a7facecd2e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2020-10-03 13:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('twitter', '0003_auto_20201001_1902'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='backgroundPic',
field=models.ImageField(blank=True, null=True, upload_to='banners'),
),
migrations.AlterField(
model_name='profile',
name='profilePic',
field=models.ImageField(blank=True, null=True, upload_to='pics'),
),
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('follower_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to='twitter.profile')),
('following_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followers', to='twitter.profile')),
],
),
]
| 34.909091
| 145
| 0.614583
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('twitter', '0003_auto_20201001_1902'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='backgroundPic',
field=models.ImageField(blank=True, null=True, upload_to='banners'),
),
migrations.AlterField(
model_name='profile',
name='profilePic',
field=models.ImageField(blank=True, null=True, upload_to='pics'),
),
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('follower_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to='twitter.profile')),
('following_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followers', to='twitter.profile')),
],
),
]
| true
| true
|
1c42f1ef23137c020f34bcdfca0a32d2cb13fcd5
| 22,575
|
py
|
Python
|
captoolkit/fittopo.py
|
tsutterley/captoolkit
|
314c4d34f49012c25286478c943b0ab13c893c62
|
[
"Apache-2.0"
] | 37
|
2019-09-27T00:36:16.000Z
|
2022-01-31T01:51:19.000Z
|
captoolkit/fittopo.py
|
tsutterley/captoolkit
|
314c4d34f49012c25286478c943b0ab13c893c62
|
[
"Apache-2.0"
] | 3
|
2020-02-27T21:22:50.000Z
|
2020-10-14T01:31:26.000Z
|
captoolkit/fittopo.py
|
tsutterley/captoolkit
|
314c4d34f49012c25286478c943b0ab13c893c62
|
[
"Apache-2.0"
] | 15
|
2019-09-24T08:06:49.000Z
|
2021-11-03T14:44:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Surface topography detrending of satellite and airborne altimetry
Program computes surface elevation residuals, containing only the temporal
component, by removing the static topography.
Depending on the number of observations in each solution one of three models
are used to solve for the topography (1) Bi-quadratic, (2) Bilinear and (3)
the average.
User specifies a grid resolution, search radius and the number of
relocations that should be used to detrend the observations. Inside each
search area the model is centered (relocated) to the centroid of the data,
given the provided number of allowed relocations.
Given the possible overlap between solutions the solution with the smallest
RMS is used and data of poorer quality overwritten.
Notes:
For mission in reference track configuration a dx = dy = 250 m and a
search radius of 350 m is appropriate, and less than n=3 relocations is
usually needed to center the data (depends on search radius)
This program can be run in parallel to processes several files at the same
time (tiles or missions etc).
Good threshold ("-m" option) for switching from biquadratic to bilinear
model is around 10-15 points.
Example:
python fittopo.py /path/to/files/*.h5 -v lon lat t_year h_cor \
-d 1 1 -r 1 -q 3 -i 5 -z 5 -m 15 -k 1 -t 2012 -j 3031 -n 2
Credits:
captoolkit - JPL Cryosphere Altimetry Processing Toolkit
Johan Nilsson (johan.nilsson@jpl.nasa.gov)
Fernando Paolo (paolofer@jpl.nasa.gov)
Alex Gardner (alex.s.gardner@jpl.nasa.gov)
Jet Propulsion Laboratory, California Institute of Technology
"""
import warnings
warnings.filterwarnings("ignore")
import os
import h5py
import pyproj
import argparse
import numpy as np
import statsmodels.api as sm
from datetime import datetime
from scipy.spatial import cKDTree
from statsmodels.robust.scale import mad
# Defaul grid spacing in x and y (km)
DXY = [1, 1]
# Defaul min and max search radius (km)
RADIUS = [1]
# Default min obs within search radius to compute solution
MINOBS = 10
# Default number of iterations for solution
NITER = 5
# Default ref time for solution: 'year' | 'fixed'=full mean t | 'variable'=cap mean t
TREF = 'fixed'
# Default projection EPSG for solution (AnIS=3031, GrIS=3413)
PROJ = 3031
# Default data columns (lon,lat,time,height,error,id)
COLS = ['lon', 'lat', 't_sec', 'h_cor', 'h_rms']
# Default expression to transform time variable
EXPR = None
# Default order of the surface fit model
ORDER = 2
# Default numbe rof obs. to change to mean solution
MLIM = 10
# Default njobs for parallel processing of *tiles*
NJOBS = 1
# Maximum slope allowed from the solution, replaced by SLOPE
SLOPE = 1.0
# Output description of solution
description = ('Compute surface elevation residuals '
'from satellite/airborne altimetry.')
# Define command-line arguments
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'files', metavar='file', type=str, nargs='+',
help='file(s) to process (HDF5)')
parser.add_argument(
'-d', metavar=('dx','dy'), dest='dxy', type=float, nargs=2,
help=('spatial resolution for grid-solution (deg or km)'),
default=DXY,)
parser.add_argument(
'-r', metavar=('radius'), dest='radius', type=float, nargs=1,
help=('min and max search radius (km)'),
default=RADIUS,)
parser.add_argument(
'-q', metavar=('n_reloc'), dest='nreloc', type=int, nargs=1,
help=('number of relocations for search radius'),
default=[0],)
parser.add_argument(
'-i', metavar='n_iter', dest='niter', type=int, nargs=1,
help=('maximum number of iterations for model solution'),
default=[NITER],)
parser.add_argument(
'-z', metavar='min_obs', dest='minobs', type=int, nargs=1,
help=('minimum obs to compute solution'),
default=[MINOBS],)
parser.add_argument(
'-m', metavar=('mod_lim'), dest='mlim', type=int, nargs=1,
help=('minimum obs for higher order models'),
default=[MLIM],)
parser.add_argument(
'-k', metavar=('mod_order'), dest='order', type=int, nargs=1,
help=('order of the surface fit model: 1=lin or 2=quad'),
default=[ORDER],)
parser.add_argument(
'-t', metavar=('ref_time'), dest='tref', type=str, nargs=1,
help=('time to reference the solution to: year|fixed|variable'),
default=[TREF],)
parser.add_argument(
'-j', metavar=('epsg_num'), dest='proj', type=str, nargs=1,
help=('projection: EPSG number (AnIS=3031, GrIS=3413)'),
default=[str(PROJ)],)
parser.add_argument(
'-v', metavar=('x','y','t','h'), dest='vnames', type=str, nargs=4,
help=('name of lon/lat/t/h in the HDF5'),
default=COLS,)
parser.add_argument(
'-x', metavar=('expr'), dest='expr', type=str, nargs=1,
help="expression to apply to time (e.g. 't + 2000'), optional",
default=[EXPR],)
parser.add_argument(
'-n', metavar=('n_jobs'), dest='njobs', type=int, nargs=1,
help="for parallel processing of multiple tiles, optional",
default=[NJOBS],)
parser.add_argument(
'-s', metavar=('slope_lim'), dest='slplim', type=float, nargs=1,
help="slope limit for x/y direction (deg)",
default=[SLOPE],)
parser.add_argument(
'-p', dest='pshow', action='store_true',
help=('print diagnostic information to terminal'),
default=False)
args = parser.parse_args()
# Pass arguments
files = args.files # input file(s)
dx = args.dxy[0] * 1e3 # grid spacing in x (km -> m)
dy = args.dxy[1] * 1e3 # grid spacing in y (km -> m)
dmax = args.radius[0] * 1e3 # min search radius (km -> m)
nreloc = args.nreloc[0] # number of relocations
nlim = args.minobs[0] # min obs for solution
mlim = args.mlim[0] # minimum value for parametric verusu men model
niter = args.niter[0] # number of iterations for solution
tref_ = args.tref[0] # ref time for solution (d.yr)
proj = args.proj[0] # EPSG number (GrIS=3413, AnIS=3031)
icol = args.vnames[:] # data input cols (x,y,t,h,err,id) [4]
expr = args.expr[0] # expression to transform time
njobs = args.njobs[0] # for parallel processing of tiles
order = args.order[0] # max order of the surface fit model
slplim = args.slplim[0] # max allowed surface slope in deg.
diag = args.pshow # print diagnostics to terminal
print('parameters:')
for p in list(vars(args).items()):
print(p)
def make_grid(xmin, xmax, ymin, ymax, dx, dy):
"""Construct output grid-coordinates."""
# Setup grid dimensions
Nn = int((np.abs(ymax - ymin)) / dy) + 1
Ne = int((np.abs(xmax - xmin)) / dx) + 1
# Initiate x/y vectors for grid
x_i = np.linspace(xmin, xmax, num=Ne)
y_i = np.linspace(ymin, ymax, num=Nn)
return np.meshgrid(x_i, y_i)
def transform_coord(proj1, proj2, x, y):
"""Transform coordinates from proj1 to proj2 (EPSG num)."""
# Set full EPSG projection strings
proj1 = pyproj.Proj("+init=EPSG:"+proj1)
proj2 = pyproj.Proj("+init=EPSG:"+proj2)
# Convert coordinates
return pyproj.transform(proj1, proj2, x, y)
def mad_std(x, axis=None):
""" Robust standard deviation (using MAD). """
return 1.4826 * np.nanmedian(np.abs(x - np.nanmedian(x, axis)), axis)
def get_radius_idx(x, y, x0, y0, r, Tree, n_reloc=0,
min_months=24, max_reloc=3, time=None, height=None):
""" Get indices of all data points inside radius. """
# Query the Tree from the center of cell
idx = Tree.query_ball_point((x0, y0), r)
#print 'query #: 1 ( first search )'
if len(idx) < 2:
return idx
if time is not None:
n_reloc = max_reloc
if n_reloc < 1:
return idx
# Relocate center of search radius and query again
for k in range(n_reloc):
# Compute new search location => relocate initial center
x0_new, y0_new = np.median(x[idx]), np.median(y[idx])
# Compute relocation distance
reloc_dist = np.hypot(x0_new-x0, y0_new-y0)
# Do not allow total relocation to be larger than the search radius
if reloc_dist > r:
break
#print 'query #:', k+2, '( reloc #:', k+1, ')'
#print 'relocation dist:', reloc_dist
idx = Tree.query_ball_point((x0_new, y0_new), r)
# If max number of relocations reached, exit
if n_reloc == k+1:
break
# If time provided, keep relocating until time-coverage is sufficient
if time is not None:
t_b, x_b = binning(time[idx], height[idx], dx=1/12., window=1/12.)[:2]
print(('months #:', np.sum(~np.isnan(x_b))))
# If sufficient coverage, exit
if np.sum(~np.isnan(x_b)) >= min_months:
break
return idx
def rlsq(x, y, n=1):
""" Fit a robust polynomial of n:th deg."""
# Test solution
if len(x[~np.isnan(y)]) <= (n + 1):
if n == 0:
p = np.nan
s = np.nan
else:
p = np.zeros((1, n)) * np.nan
s = np.nan
return p, s
# Empty array
A = np.empty((0, len(x)))
# Create counter
i = 0
# Determine if we need centering
if n > 1:
# Center x-axis
x -= np.nanmean(x)
# Special case
if n == 0:
# Mean offset
A = np.ones(len(x))
else:
# Make design matrix
while i <= n:
# Stack coefficients
A = np.vstack((A, x ** i))
# Update counter
i += 1
# Test to see if we can solve the system
try:
# Robust least squares fit
fit = sm.RLM(y, A.T, missing='drop').fit(maxiter=5, tol=0.001)
# polynomial coefficients
p = fit.params
# RMS of the residuals
s = mad_std(fit.resid)
except:
# Set output to NaN
if n == 0:
p = np.nan
s = np.nan
else:
p = np.zeros((1, n)) * np.nan
s = np.nan
return p[::-1], s
def binning(x, y, xmin=None, xmax=None, dx=1 / 12.,
window=3 / 12., interp=False, median=False):
"""Time-series binning (w/overlapping windows).
Args:
x,y: time and value of time series.
xmin,xmax: time span of returned binned series.
dx: time step of binning.
window: size of binning window.
interp: interpolate binned values to original x points.
"""
if xmin is None:
xmin = np.nanmin(x)
if xmax is None:
xmax = np.nanmax(x)
steps = np.arange(xmin, xmax, dx) # time steps
bins = [(ti, ti + window) for ti in steps] # bin limits
N = len(bins)
yb = np.full(N, np.nan)
xb = np.full(N, np.nan)
eb = np.full(N, np.nan)
nb = np.full(N, np.nan)
sb = np.full(N, np.nan)
for i in range(N):
t1, t2 = bins[i]
idx, = np.where((x >= t1) & (x <= t2))
if len(idx) == 0:
xb[i] = 0.5 * (t1 + t2)
continue
ybv = y[idx]
if median:
yb[i] = np.nanmedian(ybv)
else:
yb[i] = np.nanmean(ybv)
xb[i] = 0.5 * (t1 + t2)
eb[i] = mad_std(ybv)
nb[i] = np.sum(~np.isnan(ybv))
sb[i] = np.sum(ybv)
if interp:
try:
yb = np.interp(x, xb, yb)
eb = np.interp(x, xb, eb)
sb = np.interp(x, xb, sb)
xb = x
except:
pass
return xb, yb, eb, nb, sb
# Main function for computing parameters
def main(ifile, n=''):
# Check for empty file
if os.stat(ifile).st_size == 0:
print('input file is empty!')
return
# Start timing of script
startTime = datetime.now()
print('loading data ...')
# Determine input file type
if not ifile.endswith(('.h5', '.H5', '.hdf', '.hdf5')):
print("Input file must be in hdf5-format")
return
# Input variables
xvar, yvar, tvar, zvar = icol
# Load all 1d variables needed
with h5py.File(ifile, 'r') as fi:
lon = fi[xvar][:]
lat = fi[yvar][:]
time = fi[tvar][:]
height = fi[zvar][:]
# EPSG number for lon/lat proj
projGeo = '4326'
# EPSG number for grid proj
projGrd = proj
print('converting lon/lat to x/y ...')
# Convert into stereographic coordinates
(x, y) = transform_coord(projGeo, projGrd, lon, lat)
# Get bbox from data
(xmin, xmax, ymin, ymax) = x.min(), x.max(), y.min(), y.max()
# Apply transformation to time
if expr: time = eval(expr.replace('t', 'time'))
# Overall (fixed) mean time
t_mean = np.round(np.nanmean(time), 2)
# Grid solution - defined by nodes
(Xi, Yi) = make_grid(xmin, xmax, ymin, ymax, dx, dy)
# Flatten prediction grid
xi = Xi.ravel()
yi = Yi.ravel()
# Zip data to vector
coord = list(zip(x.ravel(), y.ravel()))
# Construct cKDTree
print('building the k-d tree ...')
Tree = cKDTree(coord)
# Create output containers
dh_topo = np.full(height.shape, np.nan)
de_topo = np.full(height.shape, 999999.)
mi_topo = np.full(height.shape, np.nan)
hm_topo = np.full(height.shape, np.nan)
sx_topo = np.full(height.shape, np.nan)
sy_topo = np.full(height.shape, np.nan)
tr_topo = np.full(height.shape, np.nan)
# Set slope limit
slp_lim = np.tan(np.deg2rad(slplim))
# Enter prediction loop
print('predicting values ...')
for i in range(len(xi)):
x0, y0 = xi[i], yi[i]
# Get indexes of data within search radius or cell bbox
idx = get_radius_idx(
x, y, x0, y0, dmax, Tree, n_reloc=nreloc,
min_months=18, max_reloc=3, time=None, height=None)
# Length of data in search cap
nobs = len(x[idx])
# Check data density
if (nobs < nlim): continue
# Parameters for model-solution
xcap = x[idx]
ycap = y[idx]
tcap = time[idx]
hcap = height[idx]
# Copy original height vector
h_org = hcap.copy()
# Centroid node
xc = np.median(xcap)
yc = np.median(ycap)
# If reference time not given, use fixed or variable mean
if tref_ == 'fixed':
tref = t_mean
elif tref_ == 'variable':
tref = np.nanmean(tcap)
else:
tref = np.float(tref_)
# Design matrix elements
c0 = np.ones(len(xcap))
c1 = xcap - xc
c2 = ycap - yc
c3 = c1 * c2
c4 = c1 * c1
c5 = c2 * c2
c6 = tcap - tref
# Length before editing
nb = len(hcap)
# Determine model order
if order == 2 and nb >= mlim * 2:
# Biquadratic surface and linear trend
Acap = np.vstack((c0, c1, c2, c3, c4, c5, c6)).T
# Model identifier
mi = 1
# Set model order
elif nb >= mlim:
# Bilinear surface and linear trend
Acap = np.vstack((c0, c1, c2, c6)).T
# Model identifier
mi = 2
else:
# Model identifier
mi = 3
# Modelled topography
if mi == 1:
# Construct model object
linear_model = sm.RLM(hcap, Acap, M=sm.robust.norms.HuberT(), missing='drop')
# Fit the model to the data,
linear_model_fit = linear_model.fit(maxiter=niter, tol=0.001)
# Coefficients
Cm = linear_model_fit.params
# Biquadratic surface
h_model = np.dot(np.vstack((c0, c1, c2, c3, c4, c5)).T, Cm[[0, 1, 2, 3, 4, 5]])
# Compute along and across track slope
sx = np.sign(Cm[1]) * slp_lim if np.abs(Cm[1]) > slp_lim else Cm[1]
sy = np.sign(Cm[2]) * slp_lim if np.abs(Cm[2]) > slp_lim else Cm[2]
# Mean height
h_avg = Cm[0]
elif mi == 2:
# Construct model object
linear_model = sm.RLM(hcap, Acap, M=sm.robust.norms.HuberT(), missing='drop')
# Fit the model to the data,
linear_model_fit = linear_model.fit(maxiter=niter, tol=0.001)
# Coefficients
Cm = linear_model_fit.params
# Bilinear surface
h_model = np.dot(np.vstack((c0, c1, c2)).T, Cm[[0, 1, 2]])
# Compute along and across track slope
sx = np.sign(Cm[1]) * slp_lim if np.abs(Cm[1]) > slp_lim else Cm[1]
sy = np.sign(Cm[2]) * slp_lim if np.abs(Cm[2]) > slp_lim else Cm[2]
# Mean height
h_avg = Cm[0]
else:
# Mean surface from median
h_avg = np.median(hcap)
# Compute distance estimates from centroid
s_dx = (xcap - xc) + 1e-3
s_dy = (ycap - yc) + 1e-3
# Center surface height
dh_i = h_org - h_avg
# Compute along-track slope
px, rms_x = rlsq(s_dx, dh_i, 1)
py, rms_x = rlsq(s_dy, dh_i, 1)
# Set along-track slope
s_x = 0 if np.isnan(px[0]) else px[0]
# Set across-track slope to zero
s_y = 0 if np.isnan(py[0]) else py[0]
# Compute along and across track slope
sx = np.sign(s_x) * slp_lim if np.abs(s_x) > slp_lim else s_x
sy = np.sign(s_y) * slp_lim if np.abs(s_y) > slp_lim else s_y
# Compute the surface height correction
h_model = h_avg + (sx * s_dx) + (sy * s_dy)
# Compute full slope
slope = np.arctan(np.sqrt(sx**2 + sy**2)) * (180 / np.pi)
# Compute residual
dh = h_org - h_model
# Number of observations
na = len(dh)
# RMSE of the residuals
RMSE = mad_std(dh)
# Overwrite errors
iup = RMSE < de_topo[idx]
# Create temporary variables
dh_cap = dh_topo[idx].copy()
de_cap = de_topo[idx].copy()
hm_cap = hm_topo[idx].copy()
mi_cap = mi_topo[idx].copy()
tr_cap = tr_topo[idx].copy()
# Update variables
dh_cap[iup] = dh[iup]
de_cap[iup] = RMSE
hm_cap[iup] = h_avg
mi_cap[iup] = mi
tr_cap[iup] = tref
# Update with current solution
dh_topo[idx] = dh_cap
de_topo[idx] = de_cap
hm_topo[idx] = hm_cap
mi_topo[idx] = mi_cap
tr_topo[idx] = tr_cap
sx_topo[idx] = np.arctan(sx) * (180 / np.pi)
sy_topo[idx] = np.arctan(sy) * (180 / np.pi)
# Print progress (every N iterations)
if (i % 100) == 0 and diag is True:
# Print message every i:th solution
print(('%s %i %s %2i %s %i %s %03d %s %.3f %s %.3f' % \
('#',i,'/',len(xi),'Model:',mi,'Nobs:',nb,'Slope:',\
np.around(slope,3),'Residual:',np.around(mad_std(dh),3))))
# Print percentage of not filled
print(('Total NaNs (percent): %.2f' % \
(100 * float(len(dh_topo[np.isnan(dh_topo)])) / float(len(dh_topo)))))
# Print percentage of each model
one = np.sum(mi_topo == 1)
two = np.sum(mi_topo == 2)
tre = np.sum(mi_topo == 3)
N = float(len(mi_topo))
print(('Model types (percent): 1 = %.2f, 2 = %.2f, 3 = %.2f' % \
(100 * one/N, 100 * two/N, 100 * tre/N)))
# Append new columns to original file
with h5py.File(ifile, 'a') as fi:
# Check if we have variables in file
try:
# Save variables
fi['h_res'] = dh_topo
fi['h_mod'] = hm_topo
fi['e_res'] = de_topo
fi['m_deg'] = mi_topo
fi['t_ref'] = tr_topo
fi['slp_x'] = sx_topo
fi['slp_y'] = sy_topo
except:
# Update variables
fi['h_res'][:] = dh_topo
fi['h_mod'][:] = hm_topo
fi['e_res'][:] = de_topo
fi['m_deg'][:] = mi_topo
fi['t_ref'][:] = tr_topo
fi['slp_x'][:] = sx_topo
fi['slp_y'][:] = sy_topo
# Rename file
if ifile.find('TOPO') < 0:
os.rename(ifile, ifile.replace('.h5', '_TOPO.h5'))
# Print some statistics
print(('*' * 75))
print(('%s %s %.5f %s %.2f %s %.2f %s %.2f %s %.2f' % \
('Statistics',
'Mean:', np.nanmedian(dh_topo),
'Std.dev:', mad_std(dh_topo),
'Min:', np.nanmin(dh_topo),
'Max:', np.nanmax(dh_topo),
'RMSE:', np.nanmedian(de_topo[dh_topo!=999999]),)))
print(('*' * 75))
print('')
# Print execution time of algorithm
print(('Execution time: '+ str(datetime.now()-startTime)))
if njobs == 1:
print('running sequential code ...')
[main(f, n) for n,f in enumerate(files)]
else:
print(('running parallel code (%d jobs) ...' % njobs))
from joblib import Parallel, delayed
Parallel(n_jobs=njobs, verbose=5)(delayed(main)(f, n) for n, f in enumerate(files))
'''
from dask import compute, delayed
from distributed import Client, LocalCluster
cluster = LocalCluster(n_workers=8, threads_per_worker=None,
scheduler_port=8002, diagnostics_port=8003)
client = Client(cluster) # connect to cluster
print client
#values = [delayed(main)(f) for f in files]
#results = compute(*values, get=client.get)
values = [client.submit(main, f) for f in files]
results = client.gather(values)
'''
| 29.821664
| 92
| 0.548439
|
import warnings
warnings.filterwarnings("ignore")
import os
import h5py
import pyproj
import argparse
import numpy as np
import statsmodels.api as sm
from datetime import datetime
from scipy.spatial import cKDTree
from statsmodels.robust.scale import mad
DXY = [1, 1]
RADIUS = [1]
MINOBS = 10
NITER = 5
TREF = 'fixed'
PROJ = 3031
COLS = ['lon', 'lat', 't_sec', 'h_cor', 'h_rms']
EXPR = None
ORDER = 2
MLIM = 10
NJOBS = 1
SLOPE = 1.0
description = ('Compute surface elevation residuals '
'from satellite/airborne altimetry.')
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'files', metavar='file', type=str, nargs='+',
help='file(s) to process (HDF5)')
parser.add_argument(
'-d', metavar=('dx','dy'), dest='dxy', type=float, nargs=2,
help=('spatial resolution for grid-solution (deg or km)'),
default=DXY,)
parser.add_argument(
'-r', metavar=('radius'), dest='radius', type=float, nargs=1,
help=('min and max search radius (km)'),
default=RADIUS,)
parser.add_argument(
'-q', metavar=('n_reloc'), dest='nreloc', type=int, nargs=1,
help=('number of relocations for search radius'),
default=[0],)
parser.add_argument(
'-i', metavar='n_iter', dest='niter', type=int, nargs=1,
help=('maximum number of iterations for model solution'),
default=[NITER],)
parser.add_argument(
'-z', metavar='min_obs', dest='minobs', type=int, nargs=1,
help=('minimum obs to compute solution'),
default=[MINOBS],)
parser.add_argument(
'-m', metavar=('mod_lim'), dest='mlim', type=int, nargs=1,
help=('minimum obs for higher order models'),
default=[MLIM],)
parser.add_argument(
'-k', metavar=('mod_order'), dest='order', type=int, nargs=1,
help=('order of the surface fit model: 1=lin or 2=quad'),
default=[ORDER],)
parser.add_argument(
'-t', metavar=('ref_time'), dest='tref', type=str, nargs=1,
help=('time to reference the solution to: year|fixed|variable'),
default=[TREF],)
parser.add_argument(
'-j', metavar=('epsg_num'), dest='proj', type=str, nargs=1,
help=('projection: EPSG number (AnIS=3031, GrIS=3413)'),
default=[str(PROJ)],)
parser.add_argument(
'-v', metavar=('x','y','t','h'), dest='vnames', type=str, nargs=4,
help=('name of lon/lat/t/h in the HDF5'),
default=COLS,)
parser.add_argument(
'-x', metavar=('expr'), dest='expr', type=str, nargs=1,
help="expression to apply to time (e.g. 't + 2000'), optional",
default=[EXPR],)
parser.add_argument(
'-n', metavar=('n_jobs'), dest='njobs', type=int, nargs=1,
help="for parallel processing of multiple tiles, optional",
default=[NJOBS],)
parser.add_argument(
'-s', metavar=('slope_lim'), dest='slplim', type=float, nargs=1,
help="slope limit for x/y direction (deg)",
default=[SLOPE],)
parser.add_argument(
'-p', dest='pshow', action='store_true',
help=('print diagnostic information to terminal'),
default=False)
args = parser.parse_args()
files = args.files
dx = args.dxy[0] * 1e3
dy = args.dxy[1] * 1e3
dmax = args.radius[0] * 1e3
nreloc = args.nreloc[0]
nlim = args.minobs[0]
mlim = args.mlim[0]
niter = args.niter[0]
tref_ = args.tref[0]
proj = args.proj[0]
icol = args.vnames[:]
expr = args.expr[0]
njobs = args.njobs[0]
order = args.order[0]
slplim = args.slplim[0]
diag = args.pshow
print('parameters:')
for p in list(vars(args).items()):
print(p)
def make_grid(xmin, xmax, ymin, ymax, dx, dy):
Nn = int((np.abs(ymax - ymin)) / dy) + 1
Ne = int((np.abs(xmax - xmin)) / dx) + 1
x_i = np.linspace(xmin, xmax, num=Ne)
y_i = np.linspace(ymin, ymax, num=Nn)
return np.meshgrid(x_i, y_i)
def transform_coord(proj1, proj2, x, y):
proj1 = pyproj.Proj("+init=EPSG:"+proj1)
proj2 = pyproj.Proj("+init=EPSG:"+proj2)
return pyproj.transform(proj1, proj2, x, y)
def mad_std(x, axis=None):
return 1.4826 * np.nanmedian(np.abs(x - np.nanmedian(x, axis)), axis)
def get_radius_idx(x, y, x0, y0, r, Tree, n_reloc=0,
min_months=24, max_reloc=3, time=None, height=None):
idx = Tree.query_ball_point((x0, y0), r)
if len(idx) < 2:
return idx
if time is not None:
n_reloc = max_reloc
if n_reloc < 1:
return idx
for k in range(n_reloc):
x0_new, y0_new = np.median(x[idx]), np.median(y[idx])
reloc_dist = np.hypot(x0_new-x0, y0_new-y0)
if reloc_dist > r:
break
idx = Tree.query_ball_point((x0_new, y0_new), r)
if n_reloc == k+1:
break
if time is not None:
t_b, x_b = binning(time[idx], height[idx], dx=1/12., window=1/12.)[:2]
print(('months #:', np.sum(~np.isnan(x_b))))
if np.sum(~np.isnan(x_b)) >= min_months:
break
return idx
def rlsq(x, y, n=1):
if len(x[~np.isnan(y)]) <= (n + 1):
if n == 0:
p = np.nan
s = np.nan
else:
p = np.zeros((1, n)) * np.nan
s = np.nan
return p, s
A = np.empty((0, len(x)))
i = 0
if n > 1:
x -= np.nanmean(x)
if n == 0:
A = np.ones(len(x))
else:
while i <= n:
A = np.vstack((A, x ** i))
i += 1
try:
fit = sm.RLM(y, A.T, missing='drop').fit(maxiter=5, tol=0.001)
p = fit.params
s = mad_std(fit.resid)
except:
if n == 0:
p = np.nan
s = np.nan
else:
p = np.zeros((1, n)) * np.nan
s = np.nan
return p[::-1], s
def binning(x, y, xmin=None, xmax=None, dx=1 / 12.,
window=3 / 12., interp=False, median=False):
if xmin is None:
xmin = np.nanmin(x)
if xmax is None:
xmax = np.nanmax(x)
steps = np.arange(xmin, xmax, dx)
bins = [(ti, ti + window) for ti in steps]
N = len(bins)
yb = np.full(N, np.nan)
xb = np.full(N, np.nan)
eb = np.full(N, np.nan)
nb = np.full(N, np.nan)
sb = np.full(N, np.nan)
for i in range(N):
t1, t2 = bins[i]
idx, = np.where((x >= t1) & (x <= t2))
if len(idx) == 0:
xb[i] = 0.5 * (t1 + t2)
continue
ybv = y[idx]
if median:
yb[i] = np.nanmedian(ybv)
else:
yb[i] = np.nanmean(ybv)
xb[i] = 0.5 * (t1 + t2)
eb[i] = mad_std(ybv)
nb[i] = np.sum(~np.isnan(ybv))
sb[i] = np.sum(ybv)
if interp:
try:
yb = np.interp(x, xb, yb)
eb = np.interp(x, xb, eb)
sb = np.interp(x, xb, sb)
xb = x
except:
pass
return xb, yb, eb, nb, sb
def main(ifile, n=''):
if os.stat(ifile).st_size == 0:
print('input file is empty!')
return
startTime = datetime.now()
print('loading data ...')
if not ifile.endswith(('.h5', '.H5', '.hdf', '.hdf5')):
print("Input file must be in hdf5-format")
return
xvar, yvar, tvar, zvar = icol
with h5py.File(ifile, 'r') as fi:
lon = fi[xvar][:]
lat = fi[yvar][:]
time = fi[tvar][:]
height = fi[zvar][:]
projGeo = '4326'
projGrd = proj
print('converting lon/lat to x/y ...')
(x, y) = transform_coord(projGeo, projGrd, lon, lat)
(xmin, xmax, ymin, ymax) = x.min(), x.max(), y.min(), y.max()
if expr: time = eval(expr.replace('t', 'time'))
t_mean = np.round(np.nanmean(time), 2)
(Xi, Yi) = make_grid(xmin, xmax, ymin, ymax, dx, dy)
xi = Xi.ravel()
yi = Yi.ravel()
coord = list(zip(x.ravel(), y.ravel()))
print('building the k-d tree ...')
Tree = cKDTree(coord)
dh_topo = np.full(height.shape, np.nan)
de_topo = np.full(height.shape, 999999.)
mi_topo = np.full(height.shape, np.nan)
hm_topo = np.full(height.shape, np.nan)
sx_topo = np.full(height.shape, np.nan)
sy_topo = np.full(height.shape, np.nan)
tr_topo = np.full(height.shape, np.nan)
slp_lim = np.tan(np.deg2rad(slplim))
print('predicting values ...')
for i in range(len(xi)):
x0, y0 = xi[i], yi[i]
idx = get_radius_idx(
x, y, x0, y0, dmax, Tree, n_reloc=nreloc,
min_months=18, max_reloc=3, time=None, height=None)
nobs = len(x[idx])
if (nobs < nlim): continue
xcap = x[idx]
ycap = y[idx]
tcap = time[idx]
hcap = height[idx]
h_org = hcap.copy()
xc = np.median(xcap)
yc = np.median(ycap)
if tref_ == 'fixed':
tref = t_mean
elif tref_ == 'variable':
tref = np.nanmean(tcap)
else:
tref = np.float(tref_)
c0 = np.ones(len(xcap))
c1 = xcap - xc
c2 = ycap - yc
c3 = c1 * c2
c4 = c1 * c1
c5 = c2 * c2
c6 = tcap - tref
nb = len(hcap)
if order == 2 and nb >= mlim * 2:
Acap = np.vstack((c0, c1, c2, c3, c4, c5, c6)).T
mi = 1
elif nb >= mlim:
Acap = np.vstack((c0, c1, c2, c6)).T
mi = 2
else:
mi = 3
if mi == 1:
linear_model = sm.RLM(hcap, Acap, M=sm.robust.norms.HuberT(), missing='drop')
linear_model_fit = linear_model.fit(maxiter=niter, tol=0.001)
Cm = linear_model_fit.params
h_model = np.dot(np.vstack((c0, c1, c2, c3, c4, c5)).T, Cm[[0, 1, 2, 3, 4, 5]])
sx = np.sign(Cm[1]) * slp_lim if np.abs(Cm[1]) > slp_lim else Cm[1]
sy = np.sign(Cm[2]) * slp_lim if np.abs(Cm[2]) > slp_lim else Cm[2]
h_avg = Cm[0]
elif mi == 2:
linear_model = sm.RLM(hcap, Acap, M=sm.robust.norms.HuberT(), missing='drop')
linear_model_fit = linear_model.fit(maxiter=niter, tol=0.001)
Cm = linear_model_fit.params
h_model = np.dot(np.vstack((c0, c1, c2)).T, Cm[[0, 1, 2]])
sx = np.sign(Cm[1]) * slp_lim if np.abs(Cm[1]) > slp_lim else Cm[1]
sy = np.sign(Cm[2]) * slp_lim if np.abs(Cm[2]) > slp_lim else Cm[2]
h_avg = Cm[0]
else:
h_avg = np.median(hcap)
s_dx = (xcap - xc) + 1e-3
s_dy = (ycap - yc) + 1e-3
dh_i = h_org - h_avg
px, rms_x = rlsq(s_dx, dh_i, 1)
py, rms_x = rlsq(s_dy, dh_i, 1)
s_x = 0 if np.isnan(px[0]) else px[0]
s_y = 0 if np.isnan(py[0]) else py[0]
sx = np.sign(s_x) * slp_lim if np.abs(s_x) > slp_lim else s_x
sy = np.sign(s_y) * slp_lim if np.abs(s_y) > slp_lim else s_y
h_model = h_avg + (sx * s_dx) + (sy * s_dy)
slope = np.arctan(np.sqrt(sx**2 + sy**2)) * (180 / np.pi)
dh = h_org - h_model
na = len(dh)
RMSE = mad_std(dh)
iup = RMSE < de_topo[idx]
dh_cap = dh_topo[idx].copy()
de_cap = de_topo[idx].copy()
hm_cap = hm_topo[idx].copy()
mi_cap = mi_topo[idx].copy()
tr_cap = tr_topo[idx].copy()
dh_cap[iup] = dh[iup]
de_cap[iup] = RMSE
hm_cap[iup] = h_avg
mi_cap[iup] = mi
tr_cap[iup] = tref
dh_topo[idx] = dh_cap
de_topo[idx] = de_cap
hm_topo[idx] = hm_cap
mi_topo[idx] = mi_cap
tr_topo[idx] = tr_cap
sx_topo[idx] = np.arctan(sx) * (180 / np.pi)
sy_topo[idx] = np.arctan(sy) * (180 / np.pi)
if (i % 100) == 0 and diag is True:
print(('%s %i %s %2i %s %i %s %03d %s %.3f %s %.3f' % \
('#',i,'/',len(xi),'Model:',mi,'Nobs:',nb,'Slope:',\
np.around(slope,3),'Residual:',np.around(mad_std(dh),3))))
print(('Total NaNs (percent): %.2f' % \
(100 * float(len(dh_topo[np.isnan(dh_topo)])) / float(len(dh_topo)))))
one = np.sum(mi_topo == 1)
two = np.sum(mi_topo == 2)
tre = np.sum(mi_topo == 3)
N = float(len(mi_topo))
print(('Model types (percent): 1 = %.2f, 2 = %.2f, 3 = %.2f' % \
(100 * one/N, 100 * two/N, 100 * tre/N)))
with h5py.File(ifile, 'a') as fi:
try:
fi['h_res'] = dh_topo
fi['h_mod'] = hm_topo
fi['e_res'] = de_topo
fi['m_deg'] = mi_topo
fi['t_ref'] = tr_topo
fi['slp_x'] = sx_topo
fi['slp_y'] = sy_topo
except:
fi['h_res'][:] = dh_topo
fi['h_mod'][:] = hm_topo
fi['e_res'][:] = de_topo
fi['m_deg'][:] = mi_topo
fi['t_ref'][:] = tr_topo
fi['slp_x'][:] = sx_topo
fi['slp_y'][:] = sy_topo
if ifile.find('TOPO') < 0:
os.rename(ifile, ifile.replace('.h5', '_TOPO.h5'))
print(('*' * 75))
print(('%s %s %.5f %s %.2f %s %.2f %s %.2f %s %.2f' % \
('Statistics',
'Mean:', np.nanmedian(dh_topo),
'Std.dev:', mad_std(dh_topo),
'Min:', np.nanmin(dh_topo),
'Max:', np.nanmax(dh_topo),
'RMSE:', np.nanmedian(de_topo[dh_topo!=999999]),)))
print(('*' * 75))
print('')
print(('Execution time: '+ str(datetime.now()-startTime)))
if njobs == 1:
print('running sequential code ...')
[main(f, n) for n,f in enumerate(files)]
else:
print(('running parallel code (%d jobs) ...' % njobs))
from joblib import Parallel, delayed
Parallel(n_jobs=njobs, verbose=5)(delayed(main)(f, n) for n, f in enumerate(files))
'''
from dask import compute, delayed
from distributed import Client, LocalCluster
cluster = LocalCluster(n_workers=8, threads_per_worker=None,
scheduler_port=8002, diagnostics_port=8003)
client = Client(cluster) # connect to cluster
print client
#values = [delayed(main)(f) for f in files]
#results = compute(*values, get=client.get)
values = [client.submit(main, f) for f in files]
results = client.gather(values)
'''
| true
| true
|
1c42f38988b5a53094b719ae7fa6eb2c0afc8857
| 3,836
|
py
|
Python
|
plugins/opsgenie/unit_test/test_get_alert.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | null | null | null |
plugins/opsgenie/unit_test/test_get_alert.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | null | null | null |
plugins/opsgenie/unit_test/test_get_alert.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | null | null | null |
import os
import sys
from parameterized import parameterized
sys.path.append(os.path.abspath("../"))
import logging
from unittest import TestCase, mock
from icon_opsgenie.actions.get_alert import GetAlert
from icon_opsgenie.actions.get_alert.schema import Output
from icon_opsgenie.connection.connection import Connection
from icon_opsgenie.connection.schema import Input
from insightconnect_plugin_runtime.exceptions import PluginException
from unit_test.mock import (
STUB_ALERT_ID,
mock_request_200,
mock_request_403,
mock_request_404,
mock_request_500,
mocked_request,
)
class TestGetAlert(TestCase):
def setUp(self) -> None:
self.connection = Connection()
self.connection.logger = logging.getLogger("connection logger")
self.connection.connect({Input.API_KEY: {"secretKey": "1234567e-123c-123c-123c-1234567e9xAd"}})
self.action = GetAlert()
self.action.connection = self.connection
self.action.logger = logging.getLogger("action logger")
self.params = {"identifier": STUB_ALERT_ID}
@mock.patch("requests.request", side_effect=mock_request_200)
def test_get_alert_when_status_ok(self, mock_get):
response = self.action.run(self.params)
expected_response = {
Output.DATA: {
"id": "70413a06-38d6-4c85-92b8-5ebc900d42e2",
"tinyId": "1791",
"alias": "event_573",
"message": "Our servers are in danger",
"status": "closed",
"acknowledged": False,
"isSeen": True,
"tags": ["OverwriteQuietHours", "Critical"],
"snoozed": True,
"snoozedUntil": "2017-04-03T20:32:35.143Z",
"count": 79,
"lastOccurredAt": "2017-04-03T20:05:50.894Z",
"createdAt": "2017-03-21T20:32:52.353Z",
"updatedAt": "2017-04-03T20:32:57.301Z",
"source": "Isengard",
"owner": "example@opsgenie.com",
"priority": "P5",
"responders": [
{"id": "4513b7ea-3b91-438f-b7e4-e3e54af9147c", "type": "team"},
{"id": "bb4d9938-c3c2-455d-aaab-727aa701c0d8", "type": "user"},
{"id": "aee8a0de-c80f-4515-a232-501c0bc9d715", "type": "escalation"},
{"id": "80564037-1984-4f38-b98e-8a1f662df552", "type": "schedule"},
],
"integration": {"id": "4513b7ea-3b91-438f-b7e4-e3e54af9147c", "name": "ExampleName", "type": "API"},
"report": {
"ackTime": 15702,
"closeTime": 60503,
"acknowledgedBy": "example@opsgenie.com",
"closedBy": "example@opsgenie.com",
},
"actions": ["Restart", "Ping"],
"entity": "EC2",
"description": "Example description",
"details": {"serverName": "ExampleName", "region": "ExampleRegion"},
},
Output.REQUESTID: "9ae63dd7-ed00-4c81-86f0-c4ffd33142c9",
Output.ELAPSED_TIME: 0.001,
}
self.assertEqual(response, expected_response)
@parameterized.expand(
[
(mock_request_403, PluginException.Preset.UNAUTHORIZED),
(mock_request_404, PluginException.Preset.NOT_FOUND),
(mock_request_500, PluginException.Preset.UNKNOWN),
],
)
def test_get_alert_when_status_error(self, mock_request, exception):
mocked_request(mock_request)
with self.assertRaises(PluginException) as context:
self.action.run(self.params)
self.assertEqual(
context.exception.cause,
PluginException.causes[exception],
)
| 38.36
| 116
| 0.584724
|
import os
import sys
from parameterized import parameterized
sys.path.append(os.path.abspath("../"))
import logging
from unittest import TestCase, mock
from icon_opsgenie.actions.get_alert import GetAlert
from icon_opsgenie.actions.get_alert.schema import Output
from icon_opsgenie.connection.connection import Connection
from icon_opsgenie.connection.schema import Input
from insightconnect_plugin_runtime.exceptions import PluginException
from unit_test.mock import (
STUB_ALERT_ID,
mock_request_200,
mock_request_403,
mock_request_404,
mock_request_500,
mocked_request,
)
class TestGetAlert(TestCase):
def setUp(self) -> None:
self.connection = Connection()
self.connection.logger = logging.getLogger("connection logger")
self.connection.connect({Input.API_KEY: {"secretKey": "1234567e-123c-123c-123c-1234567e9xAd"}})
self.action = GetAlert()
self.action.connection = self.connection
self.action.logger = logging.getLogger("action logger")
self.params = {"identifier": STUB_ALERT_ID}
@mock.patch("requests.request", side_effect=mock_request_200)
def test_get_alert_when_status_ok(self, mock_get):
response = self.action.run(self.params)
expected_response = {
Output.DATA: {
"id": "70413a06-38d6-4c85-92b8-5ebc900d42e2",
"tinyId": "1791",
"alias": "event_573",
"message": "Our servers are in danger",
"status": "closed",
"acknowledged": False,
"isSeen": True,
"tags": ["OverwriteQuietHours", "Critical"],
"snoozed": True,
"snoozedUntil": "2017-04-03T20:32:35.143Z",
"count": 79,
"lastOccurredAt": "2017-04-03T20:05:50.894Z",
"createdAt": "2017-03-21T20:32:52.353Z",
"updatedAt": "2017-04-03T20:32:57.301Z",
"source": "Isengard",
"owner": "example@opsgenie.com",
"priority": "P5",
"responders": [
{"id": "4513b7ea-3b91-438f-b7e4-e3e54af9147c", "type": "team"},
{"id": "bb4d9938-c3c2-455d-aaab-727aa701c0d8", "type": "user"},
{"id": "aee8a0de-c80f-4515-a232-501c0bc9d715", "type": "escalation"},
{"id": "80564037-1984-4f38-b98e-8a1f662df552", "type": "schedule"},
],
"integration": {"id": "4513b7ea-3b91-438f-b7e4-e3e54af9147c", "name": "ExampleName", "type": "API"},
"report": {
"ackTime": 15702,
"closeTime": 60503,
"acknowledgedBy": "example@opsgenie.com",
"closedBy": "example@opsgenie.com",
},
"actions": ["Restart", "Ping"],
"entity": "EC2",
"description": "Example description",
"details": {"serverName": "ExampleName", "region": "ExampleRegion"},
},
Output.REQUESTID: "9ae63dd7-ed00-4c81-86f0-c4ffd33142c9",
Output.ELAPSED_TIME: 0.001,
}
self.assertEqual(response, expected_response)
@parameterized.expand(
[
(mock_request_403, PluginException.Preset.UNAUTHORIZED),
(mock_request_404, PluginException.Preset.NOT_FOUND),
(mock_request_500, PluginException.Preset.UNKNOWN),
],
)
def test_get_alert_when_status_error(self, mock_request, exception):
mocked_request(mock_request)
with self.assertRaises(PluginException) as context:
self.action.run(self.params)
self.assertEqual(
context.exception.cause,
PluginException.causes[exception],
)
| true
| true
|
1c42f3a0f4bbec1276b69d459965972429089a10
| 9,230
|
py
|
Python
|
.circleci/cimodel/data/pytorch_build_data.py
|
arpancodes/pytorch
|
a3bbaf227cc549c49245e310f11788b98eef30ee
|
[
"Intel"
] | null | null | null |
.circleci/cimodel/data/pytorch_build_data.py
|
arpancodes/pytorch
|
a3bbaf227cc549c49245e310f11788b98eef30ee
|
[
"Intel"
] | null | null | null |
.circleci/cimodel/data/pytorch_build_data.py
|
arpancodes/pytorch
|
a3bbaf227cc549c49245e310f11788b98eef30ee
|
[
"Intel"
] | null | null | null |
from cimodel.lib.conf_tree import ConfigNode, X, XImportant
CONFIG_TREE_DATA = [
("xenial", [
("gcc", [
("5.4", [ # All this subtree rebases to master and then build
("3.6", [
("important", [X(True)]),
]),
]),
# TODO: bring back libtorch test
("7", [X("3.6")]),
]),
("cuda", [
("10.2", [
("3.6", [
# Build are needed for slow_gradcheck
('build_only', [X(True)]),
("slow_gradcheck", [
# If you update this slow gradcheck, you should
# also update docker_definitions.py to make sure
# the docker image match the config used here
(True, [
('shard_test', [XImportant(True)]),
]),
]),
# UNCOMMENT THE BELOW TO REENABLE LIBTORCH
# ("libtorch", [
# (True, [
# ('build_only', [X(True)]),
# ]),
# ]),
]),
]),
]),
]),
("bionic", [
("clang", [
("9", [
("3.6", [
("xla", [XImportant(True)]),
("vulkan", [XImportant(True)]),
]),
]),
]),
# @jithunnair-amd believes Jenkins builds are sufficient
# ("rocm", [
# ("3.9", [
# ("3.6", [
# ('build_only', [XImportant(True)]),
# ]),
# ]),
# ]),
]),
]
def get_major_pyver(dotted_version):
parts = dotted_version.split(".")
return "py" + parts[0]
class TreeConfigNode(ConfigNode):
def __init__(self, parent, node_name, subtree):
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
self.subtree = subtree
self.init2(node_name)
def modify_label(self, label):
return label
def init2(self, node_name):
pass
def get_children(self):
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
class TopLevelNode(TreeConfigNode):
def __init__(self, node_name, subtree):
super(TopLevelNode, self).__init__(None, node_name, subtree)
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return DistroConfigNode
class DistroConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["distro_name"] = node_name
def child_constructor(self):
distro = self.find_prop("distro_name")
next_nodes = {
"xenial": XenialCompilerConfigNode,
"bionic": BionicCompilerConfigNode,
}
return next_nodes[distro]
class PyVerConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["pyver"] = node_name
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
if node_name == "3.9":
self.props["abbreviated_pyver"] = "py3.9"
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ExperimentalFeatureConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["experimental_feature"] = node_name
def child_constructor(self):
experimental_feature = self.find_prop("experimental_feature")
next_nodes = {
"asan": AsanConfigNode,
"xla": XlaConfigNode,
"mlc": MLCConfigNode,
"vulkan": VulkanConfigNode,
"parallel_tbb": ParallelTBBConfigNode,
"noarch": NoarchConfigNode,
"parallel_native": ParallelNativeConfigNode,
"onnx": ONNXConfigNode,
"libtorch": LibTorchConfigNode,
"important": ImportantConfigNode,
"build_only": BuildOnlyConfigNode,
"shard_test": ShardTestConfigNode,
"cuda_gcc_override": CudaGccOverrideConfigNode,
"pure_torch": PureTorchConfigNode,
"slow_gradcheck": SlowGradcheckConfigNode,
}
return next_nodes[experimental_feature]
class SlowGradcheckConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_slow_gradcheck"] = True
def child_constructor(self):
return ExperimentalFeatureConfigNode
class PureTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PURE_TORCH=" + str(label)
def init2(self, node_name):
self.props["is_pure_torch"] = node_name
def child_constructor(self):
return ImportantConfigNode
class XlaConfigNode(TreeConfigNode):
def modify_label(self, label):
return "XLA=" + str(label)
def init2(self, node_name):
self.props["is_xla"] = node_name
def child_constructor(self):
return ImportantConfigNode
class MLCConfigNode(TreeConfigNode):
def modify_label(self, label):
return "MLC=" + str(label)
def init2(self, node_name):
self.props["is_mlc"] = node_name
def child_constructor(self):
return ImportantConfigNode
class AsanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Asan=" + str(label)
def init2(self, node_name):
self.props["is_asan"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ONNXConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Onnx=" + str(label)
def init2(self, node_name):
self.props["is_onnx"] = node_name
def child_constructor(self):
return ImportantConfigNode
class VulkanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Vulkan=" + str(label)
def init2(self, node_name):
self.props["is_vulkan"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelTBBConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELTBB=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "paralleltbb"
def child_constructor(self):
return ImportantConfigNode
class NoarchConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_noarch"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelNativeConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELNATIVE=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "parallelnative"
def child_constructor(self):
return ImportantConfigNode
class LibTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "BUILD_TEST_LIBTORCH=" + str(label)
def init2(self, node_name):
self.props["is_libtorch"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class CudaGccOverrideConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["cuda_gcc_override"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class BuildOnlyConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["build_only"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ShardTestConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["shard_test"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ImportantConfigNode(TreeConfigNode):
def modify_label(self, label):
return "IMPORTANT=" + str(label)
def init2(self, node_name):
self.props["is_important"] = node_name
def get_children(self):
return []
class XenialCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return XenialCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class BionicCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return BionicCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class XenialCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
class BionicCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
| 27.801205
| 98
| 0.617118
|
from cimodel.lib.conf_tree import ConfigNode, X, XImportant
CONFIG_TREE_DATA = [
("xenial", [
("gcc", [
("5.4", [
("3.6", [
("important", [X(True)]),
]),
]),
("7", [X("3.6")]),
]),
("cuda", [
("10.2", [
("3.6", [
('build_only', [X(True)]),
("slow_gradcheck", [
(True, [
('shard_test', [XImportant(True)]),
]),
]),
]),
]),
]),
]),
("bionic", [
("clang", [
("9", [
("3.6", [
("xla", [XImportant(True)]),
("vulkan", [XImportant(True)]),
]),
]),
]),
]),
]
def get_major_pyver(dotted_version):
parts = dotted_version.split(".")
return "py" + parts[0]
class TreeConfigNode(ConfigNode):
def __init__(self, parent, node_name, subtree):
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
self.subtree = subtree
self.init2(node_name)
def modify_label(self, label):
return label
def init2(self, node_name):
pass
def get_children(self):
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
class TopLevelNode(TreeConfigNode):
def __init__(self, node_name, subtree):
super(TopLevelNode, self).__init__(None, node_name, subtree)
def child_constructor(self):
return DistroConfigNode
class DistroConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["distro_name"] = node_name
def child_constructor(self):
distro = self.find_prop("distro_name")
next_nodes = {
"xenial": XenialCompilerConfigNode,
"bionic": BionicCompilerConfigNode,
}
return next_nodes[distro]
class PyVerConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["pyver"] = node_name
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
if node_name == "3.9":
self.props["abbreviated_pyver"] = "py3.9"
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ExperimentalFeatureConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["experimental_feature"] = node_name
def child_constructor(self):
experimental_feature = self.find_prop("experimental_feature")
next_nodes = {
"asan": AsanConfigNode,
"xla": XlaConfigNode,
"mlc": MLCConfigNode,
"vulkan": VulkanConfigNode,
"parallel_tbb": ParallelTBBConfigNode,
"noarch": NoarchConfigNode,
"parallel_native": ParallelNativeConfigNode,
"onnx": ONNXConfigNode,
"libtorch": LibTorchConfigNode,
"important": ImportantConfigNode,
"build_only": BuildOnlyConfigNode,
"shard_test": ShardTestConfigNode,
"cuda_gcc_override": CudaGccOverrideConfigNode,
"pure_torch": PureTorchConfigNode,
"slow_gradcheck": SlowGradcheckConfigNode,
}
return next_nodes[experimental_feature]
class SlowGradcheckConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_slow_gradcheck"] = True
def child_constructor(self):
return ExperimentalFeatureConfigNode
class PureTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PURE_TORCH=" + str(label)
def init2(self, node_name):
self.props["is_pure_torch"] = node_name
def child_constructor(self):
return ImportantConfigNode
class XlaConfigNode(TreeConfigNode):
def modify_label(self, label):
return "XLA=" + str(label)
def init2(self, node_name):
self.props["is_xla"] = node_name
def child_constructor(self):
return ImportantConfigNode
class MLCConfigNode(TreeConfigNode):
def modify_label(self, label):
return "MLC=" + str(label)
def init2(self, node_name):
self.props["is_mlc"] = node_name
def child_constructor(self):
return ImportantConfigNode
class AsanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Asan=" + str(label)
def init2(self, node_name):
self.props["is_asan"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ONNXConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Onnx=" + str(label)
def init2(self, node_name):
self.props["is_onnx"] = node_name
def child_constructor(self):
return ImportantConfigNode
class VulkanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Vulkan=" + str(label)
def init2(self, node_name):
self.props["is_vulkan"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelTBBConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELTBB=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "paralleltbb"
def child_constructor(self):
return ImportantConfigNode
class NoarchConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_noarch"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelNativeConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELNATIVE=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "parallelnative"
def child_constructor(self):
return ImportantConfigNode
class LibTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "BUILD_TEST_LIBTORCH=" + str(label)
def init2(self, node_name):
self.props["is_libtorch"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class CudaGccOverrideConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["cuda_gcc_override"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class BuildOnlyConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["build_only"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ShardTestConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["shard_test"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ImportantConfigNode(TreeConfigNode):
def modify_label(self, label):
return "IMPORTANT=" + str(label)
def init2(self, node_name):
self.props["is_important"] = node_name
def get_children(self):
return []
class XenialCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
def child_constructor(self):
return XenialCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class BionicCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
def child_constructor(self):
return BionicCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class XenialCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
def child_constructor(self):
return PyVerConfigNode
class BionicCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
def child_constructor(self):
return PyVerConfigNode
| true
| true
|
1c42f453dc61493c822c1f5baa4db34c6debf8e9
| 60,101
|
py
|
Python
|
mne/dipole.py
|
lokinou/mne-python
|
f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b
|
[
"BSD-3-Clause"
] | null | null | null |
mne/dipole.py
|
lokinou/mne-python
|
f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b
|
[
"BSD-3-Clause"
] | null | null | null |
mne/dipole.py
|
lokinou/mne-python
|
f4aa12bc9118d0739ca05c5ed5a4fba7ae71138b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Single-dipole functions and classes."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
import functools
from functools import partial
import re
import numpy as np
from .cov import compute_whitener, _ensure_cov
from .io.constants import FIFF
from .io.pick import pick_types
from .io.proj import make_projector, _needs_eeg_average_ref_proj
from .bem import _fit_sphere
from .evoked import _read_evoked, _aspect_rev, _write_evokeds
from .fixes import pinvh
from ._freesurfer import read_freesurfer_lut, _get_aseg
from .transforms import _print_coord_trans, _coord_frame_name, apply_trans
from .viz.evoked import _plot_evoked
from ._freesurfer import head_to_mni, head_to_mri
from .forward._make_forward import (_get_trans, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .surface import (transform_surface_to, _compute_nearest,
_points_outside_surface)
from .bem import _bem_find_surface, _bem_surf_name
from .source_space import _make_volume_source_space, SourceSpaces
from .parallel import parallel_func
from .utils import (logger, verbose, _time_mask, warn, _check_fname,
check_fname, _pl, fill_doc, _check_option, ShiftTimeMixin,
_svd_lwork, _repeated_svd, _get_blas_funcs, _validate_type,
_VerboseDep)
@fill_doc
class Dipole(_VerboseDep):
u"""Dipole class for sequential dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers. Note that dipole position vectors are given in
the head coordinate frame.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m) in head coordinates.
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (Am).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
conf : dict
Confidence limits in dipole orientation for "vol" in m^3 (volume),
"depth" in m (along the depth axis), "long" in m (longitudinal axis),
"trans" in m (transverse axis), "qlong" in Am, and "qtrans" in Am
(currents). The current confidence limit in the depth direction is
assumed to be zero (although it can be non-zero when a BEM is used).
.. versionadded:: 0.15
khi2 : array, shape (n_dipoles,)
The χ^2 values for the fits.
.. versionadded:: 0.15
nfree : array, shape (n_dipoles,)
The number of free parameters for each fit.
.. versionadded:: 0.15
%(verbose)s
See Also
--------
fit_dipole
DipoleFixed
read_dipole
Notes
-----
This class is for sequential dipole fits, where the position
changes as a function of time. For fixed dipole fits, where the
position is fixed as a function of time, use :class:`mne.DipoleFixed`.
"""
@verbose
def __init__(self, times, pos, amplitude, ori, gof,
name=None, conf=None, khi2=None, nfree=None,
*, verbose=None): # noqa: D102
self.times = np.array(times)
self.pos = np.array(pos)
self.amplitude = np.array(amplitude)
self.ori = np.array(ori)
self.gof = np.array(gof)
self.name = name
self.conf = dict()
if conf is not None:
for key, value in conf.items():
self.conf[key] = np.array(value)
self.khi2 = np.array(khi2) if khi2 is not None else None
self.nfree = np.array(nfree) if nfree is not None else None
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %0.3f" % np.min(self.times)
s += ", tmax : %0.3f" % np.max(self.times)
return "<Dipole | %s>" % s
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
"""Save dipole in a .dip or .bdip file.
Parameters
----------
fname : str
The name of the .dip or .bdip file.
%(overwrite)s
.. versionadded:: 0.20
%(verbose)s
Notes
-----
.. versionchanged:: 0.20
Support for writing bdip (Xfit binary) files.
"""
# obligatory fields
fname = _check_fname(fname, overwrite=overwrite)
if fname.endswith('.bdip'):
_write_dipole_bdip(fname, self)
else:
_write_dipole_text(fname, self)
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Crop data to a given time interval.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
Returns
-------
self : instance of Dipole
The cropped instance.
"""
sfreq = None
if len(self.times) > 1:
sfreq = 1. / np.median(np.diff(self.times))
mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq,
include_tmax=include_tmax)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori',
'khi2', 'nfree'):
if getattr(self, attr) is not None:
setattr(self, attr, getattr(self, attr)[mask])
for key in self.conf.keys():
self.conf[key] = self.conf[key][mask]
return self
def copy(self):
"""Copy the Dipoles object.
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
mode='orthoview', coord_frame='mri', idx='gof',
show_all=True, ax=None, block=False, show=True,
scale=5e-3, color=(1.0, 0.0, 0.0), fig=None,
verbose=None, title=None):
"""Plot dipole locations in 3d.
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
%(subjects_dir)s
mode : str
Can be ``'arrow'``, ``'sphere'`` or ``'orthoview'``.
.. versionadded:: 0.14.0
coord_frame : str
Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'.
.. versionadded:: 0.14.0
idx : int | 'gof' | 'amplitude'
Index of the initially plotted dipole. Can also be 'gof' to plot
the dipole with highest goodness of fit value or 'amplitude' to
plot the dipole with the highest amplitude. The dipoles can also be
browsed through using up/down arrow keys or mouse scroll. Defaults
to 'gof'. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show_all : bool
Whether to always plot all the dipoles. If True (default), the
active dipole is plotted as a red dot and it's location determines
the shown MRI slices. The the non-active dipoles are plotted as
small blue dots. If False, only the active dipole is plotted.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
ax : instance of matplotlib Axes3D | None
Axes to plot into. If None (default), axes will be created.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False. Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show : bool
Show figure if True. Defaults to True.
Only used if mode equals 'orthoview'.
scale : float
The scale of the dipoles if ``mode`` is 'arrow' or 'sphere'.
color : tuple
The color of the dipoles if ``mode`` is 'arrow' or 'sphere'.
fig : instance of Figure3D | None
PyVista figure in which to plot the alignment.
If ``None``, creates a new 600x600 pixel figure with black
background.
.. versionadded:: 0.14.0
%(verbose)s
%(title_dipole_locs_fig)s
.. versionadded:: 0.21.0
Returns
-------
fig : instance of Figure3D or matplotlib.figure.Figure
The PyVista figure or matplotlib Figure.
Notes
-----
.. versionadded:: 0.9.0
"""
_check_option('mode', mode, [None, 'arrow', 'sphere', 'orthoview'])
from .viz import plot_dipole_locations
return plot_dipole_locations(
self, trans, subject, subjects_dir, mode, coord_frame, idx,
show_all, ax, block, show, scale=scale, color=color, fig=fig,
title=title)
@verbose
def to_mni(self, subject, trans, subjects_dir=None,
verbose=None):
"""Convert dipole location from head to MNI coordinates.
Parameters
----------
%(subject)s
%(trans_not_none)s
%(subjects_dir)s
%(verbose)s
Returns
-------
pos_mni : array, shape (n_pos, 3)
The MNI coordinates (in mm) of pos.
"""
mri_head_t, trans = _get_trans(trans)
return head_to_mni(self.pos, subject, mri_head_t,
subjects_dir=subjects_dir, verbose=verbose)
@verbose
def to_mri(self, subject, trans, subjects_dir=None,
verbose=None):
"""Convert dipole location from head to MRI surface RAS coordinates.
Parameters
----------
%(subject)s
%(trans_not_none)s
%(subjects_dir)s
%(verbose)s
Returns
-------
pos_mri : array, shape (n_pos, 3)
The Freesurfer surface RAS coordinates (in mm) of pos.
"""
mri_head_t, trans = _get_trans(trans)
return head_to_mri(self.pos, subject, mri_head_t,
subjects_dir=subjects_dir, verbose=verbose)
@verbose
def to_volume_labels(self, trans, subject='fsaverage', aseg='aparc+aseg',
subjects_dir=None, verbose=None):
"""Find an ROI in atlas for the dipole positions.
Parameters
----------
%(trans)s
%(subject)s
%(aseg)s
%(subjects_dir)s
%(verbose)s
Returns
-------
labels : list
List of anatomical region names from anatomical segmentation atlas.
Notes
-----
.. versionadded:: 0.24
"""
aseg_img, aseg_data = _get_aseg(aseg, subject, subjects_dir)
mri_vox_t = np.linalg.inv(aseg_img.header.get_vox2ras_tkr())
# Load freesurface atlas LUT
lut_inv = read_freesurfer_lut()[0]
lut = {v: k for k, v in lut_inv.items()}
# transform to voxel space from head space
pos = self.to_mri(subject, trans, subjects_dir=subjects_dir,
verbose=verbose)
pos = apply_trans(mri_vox_t, pos)
pos = np.rint(pos).astype(int)
# Get voxel value and label from LUT
labels = [lut.get(aseg_data[tuple(coord)], 'Unknown') for coord in pos]
return labels
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time.
Parameters
----------
color : matplotlib color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, item):
"""Get a time slice.
Parameters
----------
item : array-like or slice
The slice of time points to use.
Returns
-------
dip : instance of Dipole
The sliced dipole.
"""
if isinstance(item, int): # make sure attributes stay 2d
item = [item]
selected_times = self.times[item].copy()
selected_pos = self.pos[item, :].copy()
selected_amplitude = self.amplitude[item].copy()
selected_ori = self.ori[item, :].copy()
selected_gof = self.gof[item].copy()
selected_name = self.name
selected_conf = dict()
for key in self.conf.keys():
selected_conf[key] = self.conf[key][item]
selected_khi2 = self.khi2[item] if self.khi2 is not None else None
selected_nfree = self.nfree[item] if self.nfree is not None else None
return Dipole(
selected_times, selected_pos, selected_amplitude, selected_ori,
selected_gof, selected_name, selected_conf, selected_khi2,
selected_nfree)
def __len__(self):
"""Return the number of dipoles.
Returns
-------
len : int
The number of dipoles.
Examples
--------
This can be used as::
>>> len(dipoles) # doctest: +SKIP
10
"""
return self.pos.shape[0]
def _read_dipole_fixed(fname):
"""Read a fixed dipole FIF file."""
logger.info('Reading %s ...' % fname)
info, nave, aspect_kind, comment, times, data, _ = _read_evoked(fname)
return DipoleFixed(info, data, times, nave, aspect_kind, comment=comment)
@fill_doc
class DipoleFixed(ShiftTimeMixin, _VerboseDep):
"""Dipole class for fixed-position dipole fits.
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Parameters
----------
%(info_not_none)s
data : array, shape (n_channels, n_times)
The dipole data.
times : array, shape (n_times,)
The time points.
nave : int
Number of averages.
aspect_kind : int
The kind of data.
comment : str
The dipole comment.
%(verbose)s
See Also
--------
read_dipole
Dipole
fit_dipole
Notes
-----
This class is for fixed-position dipole fits, where the position
(and maybe orientation) is static over time. For sequential dipole fits,
where the position can change a function of time, use :class:`mne.Dipole`.
.. versionadded:: 0.12
"""
@verbose
def __init__(self, info, data, times, nave, aspect_kind,
comment='', *, verbose=None): # noqa: D102
self.info = info
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(aspect_kind, 'unknown')
self.comment = comment
self.times = times
self.data = data
self.preload = True
self._update_first_last()
def __repr__(self): # noqa: D105
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<DipoleFixed | %s>" % s
def copy(self):
"""Copy the DipoleFixed object.
Returns
-------
inst : instance of DipoleFixed
The copy.
Notes
-----
.. versionadded:: 0.16
"""
return deepcopy(self)
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@verbose
def save(self, fname, verbose=None):
"""Save dipole in a .fif file.
Parameters
----------
fname : str
The name of the .fif file. Must end with ``'.fif'`` or
``'.fif.gz'`` to make it explicit that the file contains
dipole information in FIF format.
%(verbose)s
"""
check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz',
'_dip.fif', '_dip.fif.gz',),
('.fif', '.fif.gz'))
_write_evokeds(fname, self, check=False)
def plot(self, show=True, time_unit='s'):
"""Plot dipole data.
Parameters
----------
show : bool
Call pyplot.show() at the end or not.
time_unit : str
The units for the time axis, can be "ms" or "s" (default).
.. versionadded:: 0.16
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure containing the time courses.
"""
return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show,
ylim=None, xlim='tight', proj=False, hline=None,
units=None, scalings=None, titles=None, axes=None,
gfp=False, window_title=None, spatial_colors=False,
plot_type="butterfly", selectable=False,
time_unit=time_unit)
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE.
Parameters
----------
fname : str
The name of the .dip or .fif file.
%(verbose)s
Returns
-------
%(dipole)s
See Also
--------
Dipole
DipoleFixed
fit_dipole
Notes
-----
.. versionchanged:: 0.20
Support for reading bdip (Xfit binary) format.
"""
fname = _check_fname(fname, overwrite='read', must_exist=True)
if fname.endswith('.fif') or fname.endswith('.fif.gz'):
return _read_dipole_fixed(fname)
elif fname.endswith('.bdip'):
return _read_dipole_bdip(fname)
else:
return _read_dipole_text(fname)
def _read_dipole_text(fname):
"""Read a dipole text file."""
# Figure out the special fields
need_header = True
def_line = name = None
# There is a bug in older np.loadtxt regarding skipping fields,
# so just read the data ourselves (need to get name and header anyway)
data = list()
with open(fname, 'r') as fid:
for line in fid:
if not (line.startswith('%') or line.startswith('#')):
need_header = False
data.append(line.strip().split())
else:
if need_header:
def_line = line
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
del line
data = np.atleast_2d(np.array(data, float))
if def_line is None:
raise IOError('Dipole text file is missing field definition '
'comment, cannot parse %s' % (fname,))
# actually parse the fields
def_line = def_line.lstrip('%').lstrip('#').strip()
# MNE writes it out differently than Elekta, let's standardize them...
fields = re.sub(r'([X|Y|Z] )\(mm\)', # "X (mm)", etc.
lambda match: match.group(1).strip() + '/mm', def_line)
fields = re.sub(r'\((.*?)\)', # "Q(nAm)", etc.
lambda match: '/' + match.group(1), fields)
fields = re.sub('(begin|end) ', # "begin" and "end" with no units
lambda match: match.group(1) + '/ms', fields)
fields = fields.lower().split()
required_fields = ('begin/ms',
'x/mm', 'y/mm', 'z/mm',
'q/nam', 'qx/nam', 'qy/nam', 'qz/nam',
'g/%')
optional_fields = ('khi^2', 'free', # standard ones
# now the confidence fields (up to 5!)
'vol/mm^3', 'depth/mm', 'long/mm', 'trans/mm',
'qlong/nam', 'qtrans/nam')
conf_scales = [1e-9, 1e-3, 1e-3, 1e-3, 1e-9, 1e-9]
missing_fields = sorted(set(required_fields) - set(fields))
if len(missing_fields) > 0:
raise RuntimeError('Could not find necessary fields in header: %s'
% (missing_fields,))
handled_fields = set(required_fields) | set(optional_fields)
assert len(handled_fields) == len(required_fields) + len(optional_fields)
ignored_fields = sorted(set(fields) -
set(handled_fields) -
{'end/ms'})
if len(ignored_fields) > 0:
warn('Ignoring extra fields in dipole file: %s' % (ignored_fields,))
if len(fields) != data.shape[1]:
raise IOError('More data fields (%s) found than data columns (%s): %s'
% (len(fields), data.shape[1], fields))
logger.info("%d dipole(s) found" % len(data))
if 'end/ms' in fields:
if np.diff(data[:, [fields.index('begin/ms'),
fields.index('end/ms')]], 1, -1).any():
warn('begin and end fields differed, but only begin will be used '
'to store time values')
# Find the correct column in our data array, then scale to proper units
idx = [fields.index(field) for field in required_fields]
assert len(idx) >= 9
times = data[:, idx[0]] / 1000.
pos = 1e-3 * data[:, idx[1:4]] # put data in meters
amplitude = data[:, idx[4]]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, idx[5:8]] / norm[:, np.newaxis]
gof = data[:, idx[8]]
# Deal with optional fields
optional = [None] * 2
for fi, field in enumerate(optional_fields[:2]):
if field in fields:
optional[fi] = data[:, fields.index(field)]
khi2, nfree = optional
conf = dict()
for field, scale in zip(optional_fields[2:], conf_scales): # confidence
if field in fields:
conf[field.split('/')[0]] = scale * data[:, fields.index(field)]
return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)
def _write_dipole_text(fname, dip):
fmt = ' %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f'
header = ('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%')
t = dip.times[:, np.newaxis] * 1000.
gof = dip.gof[:, np.newaxis]
amp = 1e9 * dip.amplitude[:, np.newaxis]
out = (t, t, dip.pos / 1e-3, amp, dip.ori * amp, gof)
# optional fields
fmts = dict(khi2=(' khi^2', ' %8.1f', 1.),
nfree=(' free', ' %5d', 1),
vol=(' vol/mm^3', ' %9.3f', 1e9),
depth=(' depth/mm', ' %9.3f', 1e3),
long=(' long/mm', ' %8.3f', 1e3),
trans=(' trans/mm', ' %9.3f', 1e3),
qlong=(' Qlong/nAm', ' %10.3f', 1e9),
qtrans=(' Qtrans/nAm', ' %11.3f', 1e9),
)
for key in ('khi2', 'nfree'):
data = getattr(dip, key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
for key in ('vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'):
data = dip.conf.get(key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
out = np.concatenate(out, axis=-1)
# NB CoordinateSystem is hard-coded as Head here
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write((header + '\n').encode('utf-8'))
np.savetxt(fid, out, fmt=fmt)
if dip.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% dip.name).encode('utf-8'))
_BDIP_ERROR_KEYS = ('depth', 'long', 'trans', 'qlong', 'qtrans')
def _read_dipole_bdip(fname):
name = None
nfree = None
with open(fname, 'rb') as fid:
# Which dipole in a multi-dipole set
times = list()
pos = list()
amplitude = list()
ori = list()
gof = list()
conf = dict(vol=list())
khi2 = list()
has_errors = None
while True:
num = np.frombuffer(fid.read(4), '>i4')
if len(num) == 0:
break
times.append(np.frombuffer(fid.read(4), '>f4')[0])
fid.read(4) # end
fid.read(12) # r0
pos.append(np.frombuffer(fid.read(12), '>f4'))
Q = np.frombuffer(fid.read(12), '>f4')
amplitude.append(np.linalg.norm(Q))
ori.append(Q / amplitude[-1])
gof.append(100 * np.frombuffer(fid.read(4), '>f4')[0])
this_has_errors = bool(np.frombuffer(fid.read(4), '>i4')[0])
if has_errors is None:
has_errors = this_has_errors
for key in _BDIP_ERROR_KEYS:
conf[key] = list()
assert has_errors == this_has_errors
fid.read(4) # Noise level used for error computations
limits = np.frombuffer(fid.read(20), '>f4') # error limits
for key, lim in zip(_BDIP_ERROR_KEYS, limits):
conf[key].append(lim)
fid.read(100) # (5, 5) fully describes the conf. ellipsoid
conf['vol'].append(np.frombuffer(fid.read(4), '>f4')[0])
khi2.append(np.frombuffer(fid.read(4), '>f4')[0])
fid.read(4) # prob
fid.read(4) # total noise estimate
return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)
def _write_dipole_bdip(fname, dip):
with open(fname, 'wb+') as fid:
for ti, t in enumerate(dip.times):
fid.write(np.zeros(1, '>i4').tobytes()) # int dipole
fid.write(np.array([t, 0]).astype('>f4').tobytes())
fid.write(np.zeros(3, '>f4').tobytes()) # r0
fid.write(dip.pos[ti].astype('>f4').tobytes()) # pos
Q = dip.amplitude[ti] * dip.ori[ti]
fid.write(Q.astype('>f4').tobytes())
fid.write(np.array(dip.gof[ti] / 100., '>f4').tobytes())
has_errors = int(bool(len(dip.conf)))
fid.write(np.array(has_errors, '>i4').tobytes()) # has_errors
fid.write(np.zeros(1, '>f4').tobytes()) # noise level
for key in _BDIP_ERROR_KEYS:
val = dip.conf[key][ti] if key in dip.conf else 0.
assert val.shape == ()
fid.write(np.array(val, '>f4').tobytes())
fid.write(np.zeros(25, '>f4').tobytes())
conf = dip.conf['vol'][ti] if 'vol' in dip.conf else 0.
fid.write(np.array(conf, '>f4').tobytes())
khi2 = dip.khi2[ti] if dip.khi2 is not None else 0
fid.write(np.array(khi2, '>f4').tobytes())
fid.write(np.zeros(1, '>f4').tobytes()) # prob
fid.write(np.zeros(1, '>f4').tobytes()) # total noise est
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff."""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, silent=True)
B = np.concatenate(B, axis=1)
assert np.isfinite(B).all()
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
_, _, dgemm = _get_ddot_dgemv_dgemm()
B = dgemm(1., B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
@verbose
def _make_guesses(surf, grid, exclude, mindist, n_jobs=1, verbose=None):
"""Make a guess space inside a sphere or BEM surface."""
if 'rr' in surf:
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_surf_name[surf['id']],
_coord_frame_name(surf['coord_frame'])))
else:
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * surf['R']))
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)[0]
assert 'vertno' in src
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']), type='discrete')
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None,
lwork=None):
"""Calculate the residual sum of squares."""
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = _repeated_svd(fwd, lwork, overwrite_a=True)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
# mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
return 1. - gof
@functools.lru_cache(None)
def _get_ddot_dgemv_dgemm():
return _get_blas_funcs(np.float64, ('dot', 'gemv', 'gemm'))
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD."""
ddot, dgemv, _ = _get_ddot_dgemv_dgemm()
ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2
one = dgemv(1., vv[:ncomp], B) # np.dot(vv[:ncomp], B)
Bm2 = ddot(one, one) # np.sum(one * one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None):
"""Fit the dipole moment once the location is known."""
from scipy import linalg
if 'fwd' in fwd_data:
# should be a single precomputed "guess" (i.e., fixed position)
assert rd is None
fwd = fwd_data['fwd']
assert fwd.shape[0] == 3
fwd_orig = fwd_data['fwd_orig']
assert fwd_orig.shape[0] == 3
scales = fwd_data['scales']
assert scales.shape == (3,)
fwd_svd = fwd_data['fwd_svd'][0]
else:
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
fwd_svd = None
if ori is None:
if fwd_svd is None:
fwd_svd = linalg.svd(fwd, full_matrices=False)
uu, sing, vv = fwd_svd
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
one /= sing[:ncomp]
Q = np.dot(one, uu.T[:ncomp])
else:
fwd = np.dot(ori[np.newaxis], fwd)
sing = np.linalg.norm(fwd)
one = np.dot(fwd / sing, B)
gof = (one * one)[0] / B2
Q = ori * np.sum(one / sing)
ncomp = 3
# Counteract the effect of column normalization
Q *= scales[0]
B_residual_noproj = B_orig - np.dot(fwd_orig.T, Q)
return Q, gof, B_residual_noproj, ncomp
def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,
guess_data, fwd_data, whitener, ori, n_jobs, rank, rhoend):
"""Fit a single dipole to the given whitened, projected data."""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(fun, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank, rhoend)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
conf = None
if res[0][4] is not None:
conf = np.array([r[4] for r in res])
keys = ['vol', 'depth', 'long', 'trans', 'qlong', 'qtrans']
conf = {key: conf[:, ki] for ki, key in enumerate(keys)}
khi2 = np.array([r[5] for r in res])
nfree = np.array([r[6] for r in res])
residual_noproj = np.array([r[7] for r in res]).T
return pos, amp, ori, gof, conf, khi2, nfree, residual_noproj
'''Simplex code in case we ever want/need it for testing
def _make_tetra_simplex():
"""Make the initial tetrahedron"""
#
# For this definition of a regular tetrahedron, see
#
# http://mathworld.wolfram.com/Tetrahedron.html
#
x = np.sqrt(3.0) / 3.0
r = np.sqrt(6.0) / 12.0
R = 3 * r
d = x / 2.0
simplex = 1e-2 * np.array([[x, 0.0, -r],
[-d, 0.5, -r],
[-d, -0.5, -r],
[0., 0., R]])
return simplex
def try_(p, y, psum, ndim, fun, ihi, neval, fac):
"""Helper to try a value"""
ptry = np.empty(ndim)
fac1 = (1.0 - fac) / ndim
fac2 = fac1 - fac
ptry = psum * fac1 - p[ihi] * fac2
ytry = fun(ptry)
neval += 1
if ytry < y[ihi]:
y[ihi] = ytry
psum[:] += ptry - p[ihi]
p[ihi] = ptry
return ytry, neval
def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
"""Minimization with the simplex algorithm
Modified from Numerical recipes"""
y = np.array([fun(s) for s in p])
ndim = p.shape[1]
assert p.shape[0] == ndim + 1
mpts = ndim + 1
neval = 0
psum = p.sum(axis=0)
loop = 1
while(True):
ilo = 1
if y[1] > y[2]:
ihi = 1
inhi = 2
else:
ihi = 2
inhi = 1
for i in range(mpts):
if y[i] < y[ilo]:
ilo = i
if y[i] > y[ihi]:
inhi = ihi
ihi = i
elif y[i] > y[inhi]:
if i != ihi:
inhi = i
rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
if rtol < ftol:
break
if neval >= max_eval:
raise RuntimeError('Maximum number of evaluations exceeded.')
if stol > 0: # Has the simplex collapsed?
dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
if loop > 5 and dsum < stol:
break
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
if ytry <= y[ilo]:
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
elif ytry >= y[inhi]:
ysave = y[ihi]
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
if ytry >= ysave:
for i in range(mpts):
if i != ilo:
psum[:] = 0.5 * (p[i] + p[ilo])
p[i] = psum
y[i] = fun(psum)
neval += ndim
psum = p.sum(axis=0)
loop += 1
'''
def _fit_confidence(rd, Q, ori, whitener, fwd_data):
# As describedd in the Xfit manual, confidence intervals can be calculated
# by examining a linearization of model at the best-fitting location,
# i.e. taking the Jacobian and using the whitener:
#
# J = [∂b/∂x ∂b/∂y ∂b/∂z ∂b/∂Qx ∂b/∂Qy ∂b/∂Qz]
# C = (J.T C^-1 J)^-1
#
# And then the confidence interval is the diagonal of C, scaled by 1.96
# (for 95% confidence).
from scipy import linalg
direction = np.empty((3, 3))
# The coordinate system has the x axis aligned with the dipole orientation,
direction[0] = ori
# the z axis through the origin of the sphere model
rvec = rd - fwd_data['inner_skull']['r0']
direction[2] = rvec - ori * np.dot(ori, rvec) # orthogonalize
direction[2] /= np.linalg.norm(direction[2])
# and the y axis perpendical with these forming a right-handed system.
direction[1] = np.cross(direction[2], direction[0])
assert np.allclose(np.dot(direction, direction.T), np.eye(3))
# Get spatial deltas in dipole coordinate directions
deltas = (-1e-4, 1e-4)
J = np.empty((whitener.shape[0], 6))
for ii in range(3):
fwds = []
for delta in deltas:
this_r = rd[np.newaxis] + delta * direction[ii]
fwds.append(
np.dot(Q, _dipole_forwards(fwd_data, whitener, this_r)[0]))
J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# Get current (Q) deltas in the dipole directions
deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q)
this_fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis])[0]
for ii in range(3):
fwds = []
for delta in deltas:
fwds.append(np.dot(Q + delta * direction[ii], this_fwd))
J[:, ii + 3] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# J is already whitened, so we don't need to do np.dot(whitener, J).
# However, the units in the Jacobian are potentially quite different,
# so we need to do some normalization during inversion, then revert.
direction_norm = np.linalg.norm(J[:, :3])
Q_norm = np.linalg.norm(J[:, 3:5]) # omit possible zero Z
norm = np.array([direction_norm] * 3 + [Q_norm] * 3)
J /= norm
J = np.dot(J.T, J)
C = pinvh(J, rtol=1e-14)
C /= norm
C /= norm[:, np.newaxis]
conf = 1.96 * np.sqrt(np.diag(C))
# The confidence volume of the dipole location is obtained from by
# taking the eigenvalues of the upper left submatrix and computing
# v = 4π/3 √(c^3 λ1 λ2 λ3) with c = 7.81, or:
vol_conf = 4 * np.pi / 3. * np.sqrt(
476.379541 * np.prod(linalg.eigh(C[:3, :3], eigvals_only=True)))
conf = np.concatenate([conf, [vol_conf]])
# Now we reorder and subselect the proper columns:
# vol, depth, long, trans, Qlong, Qtrans (discard Qdepth, assumed zero)
conf = conf[[6, 2, 0, 1, 3, 4]]
return conf
def _surface_constraint(rd, surf, min_dist_to_inner_skull):
"""Surface fitting constraint."""
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
# min_dist_to_inner_skull. This can be enforced by adding a
# constrain proportional to its distance.
dist -= min_dist_to_inner_skull
return dist
def _sphere_constraint(rd, r0, R_adj):
"""Sphere fitting constraint."""
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, fmin_cobyla, ori, rank,
rhoend):
"""Fit a single bit of data."""
B = np.dot(whitener, B_orig)
# make constraint function to keep the solver within the inner skull
if 'rr' in fwd_data['inner_skull']: # bem
surf = fwd_data['inner_skull']
constraint = partial(_surface_constraint, surf=surf,
min_dist_to_inner_skull=min_dist_to_inner_skull)
else: # sphere
surf = None
constraint = partial(
_sphere_constraint, r0=fwd_data['inner_skull']['r0'],
R_adj=fwd_data['inner_skull']['R'] - min_dist_to_inner_skull)
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, B
idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_data['fwd_svd'])])
x0 = guess_rrs[idx]
lwork = _svd_lwork((3, B.shape[0]))
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener,
lwork=lwork)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=rhoend, disp=False)
# simplex = _make_tetra_simplex() + x0
# _simplex_minimize(simplex, 1e-4, 2e-4, fun)
# rd_final = simplex[0]
# Compute the dipole moment at the final point
Q, gof, residual_noproj, n_comp = _fit_Q(
fwd_data, whitener, B, B2, B_orig, rd_final, ori=ori)
khi2 = (1 - gof) * B2
nfree = rank - n_comp
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(
surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank, rhoend):
"""Fit a data using a fixed position."""
B = np.dot(whitener, B_orig)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, np.zeros(6)
# Compute the dipole moment
Q, gof, residual_noproj = _fit_Q(guess_data, whitener, B, B2, B_orig,
rd=None, ori=ori)[:3]
if ori is None:
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
else:
amp = np.dot(Q, ori)
rd_final = guess_rrs[0]
# This will be slow, and we don't use it anyway, so omit it for now:
# conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
conf = khi2 = nfree = None
# No corresponding 'logger' message here because it should go *very* fast
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
pos=None, ori=None, rank=None, accuracy='normal', tol=5e-5,
verbose=None):
"""Fit a dipole.
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | instance of ConductorModel
The BEM filename (str) or conductor model.
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
min_dist : float
Minimum distance (in millimeters) from the dipole to the inner skull.
Must be positive. Note that because this is a constraint passed to
a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
fits could be 4.9 mm from the inner skull.
%(n_jobs)s
It is used in field computation and fitting.
pos : ndarray, shape (3,) | None
Position of the dipole to use. If None (default), sequential
fitting (different position and orientation for each time instance)
is performed. If a position (in head coords) is given as an array,
the position is fixed during fitting.
.. versionadded:: 0.12
ori : ndarray, shape (3,) | None
Orientation of the dipole to use. If None (default), the
orientation is free to change as a function of time. If an
orientation (in head coordinates) is given as an array, ``pos``
must also be provided, and the routine computes the amplitude and
goodness of fit of the dipole at the given position and orientation
for each time instant.
.. versionadded:: 0.12
%(rank_none)s
.. versionadded:: 0.20
accuracy : str
Can be "normal" (default) or "accurate", which gives the most accurate
coil definition but is typically not necessary for real-world data.
.. versionadded:: 0.24
tol : float
Final accuracy of the optimization (see ``rhoend`` argument of
:func:`scipy.optimize.fmin_cobyla`).
.. versionadded:: 0.24
%(verbose)s
Returns
-------
dip : instance of Dipole or DipoleFixed
The dipole fits. A :class:`mne.DipoleFixed` is returned if
``pos`` and ``ori`` are both not None, otherwise a
:class:`mne.Dipole` is returned.
residual : instance of Evoked
The M-EEG data channels with the fitted dipolar activity removed.
See Also
--------
mne.beamformer.rap_music
Dipole
DipoleFixed
read_dipole
Notes
-----
.. versionadded:: 0.9.0
"""
from scipy import linalg
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
_validate_type(accuracy, str, 'accuracy')
_check_option('accuracy', accuracy, ('accurate', 'normal'))
# Determine if a list of projectors has an average EEG ref
if _needs_eeg_average_ref_proj(evoked.info):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
if ori is not None and pos is None:
raise ValueError('pos must be provided if ori is not None')
data = evoked.data
if not np.isfinite(data).all():
raise ValueError('Evoked data must be finite')
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
del min_dist
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[]))
if isinstance(bem, str):
bem_extra = bem
else:
bem_extra = repr(bem)
logger.info('BEM : %s' % bem_extra)
mri_head_t, trans = _get_trans(trans)
logger.info('MRI transform : %s' % trans)
bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=False)
if not bem['is_sphere']:
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
# r0 back to head frame for logging
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
inner_skull['r0'] = r0
logger.info('Head origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
del R, r0
else:
r0 = bem['r0']
if len(bem.get('layers', [])) > 0:
R = bem['layers'][0]['rad']
kind = 'rad'
else: # MEG-only
# Use the minimum distance to the MEG sensors as the radius then
R = np.dot(np.linalg.inv(info['dev_head_t']['trans']),
np.hstack([r0, [1.]]))[:3] # r0 -> device
R = R - [info['chs'][pick]['loc'][:3]
for pick in pick_types(info, meg=True, exclude=[])]
if len(R) == 0:
raise RuntimeError('No MEG channels found, but MEG-only '
'sphere model used')
R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors
kind = 'max_rad'
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, '
'%s = %6.1f mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R))
inner_skull = dict(R=R, r0=r0) # NB sphere model defined in head frame
del R, r0
# Deal with DipoleFixed cases here
if pos is not None:
fixed_position = True
pos = np.array(pos, float)
if pos.shape != (3,):
raise ValueError('pos must be None or a 3-element array-like,'
' got %s' % (pos,))
logger.info('Fixed position : %6.1f %6.1f %6.1f mm'
% tuple(1000 * pos))
if ori is not None:
ori = np.array(ori, float)
if ori.shape != (3,):
raise ValueError('oris must be None or a 3-element array-like,'
' got %s' % (ori,))
norm = np.sqrt(np.sum(ori * ori))
if not np.isclose(norm, 1):
raise ValueError('ori must be a unit vector, got length %s'
% (norm,))
logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm'
% tuple(ori))
else:
logger.info('Free orientation : <time-varying>')
fit_n_jobs = 1 # only use 1 job to do the guess fitting
else:
fixed_position = False
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = max(0.005, min_dist_to_inner_skull)
guess_exclude = 0.02
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm'
% (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm'
% (1000 * guess_exclude,))
logger.info(f'Using {accuracy} MEG coil definitions.')
fit_n_jobs = n_jobs
cov = _ensure_cov(cov)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = evoked.get_channel_types()
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, exclude='bads',
accuracy=accuracy, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True, ref_meg=False)
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float64)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener, _, rank = compute_whitener(cov, info, picks=picks,
rank=rank, return_rank=True)
# Proceed to computing the fits (make_guess_data)
if fixed_position:
guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True]))
logger.info('Compute forward for dipole location...')
else:
logger.info('\n---- Computing the forward solution for the guesses...')
guess_src = _make_guesses(inner_skull, guess_grid, guess_exclude,
guess_mindist, n_jobs=n_jobs)[0]
# grid coordinates go from mri to head frame
transform_surface_to(guess_src, 'head', mri_head_t)
logger.info('Go through all guess source locations...')
# inner_skull goes from mri to head frame
if 'rr' in inner_skull:
transform_surface_to(inner_skull, 'head', mri_head_t)
if fixed_position:
if 'rr' in inner_skull:
check = _surface_constraint(pos, inner_skull,
min_dist_to_inner_skull)
else:
check = _sphere_constraint(
pos, inner_skull['r0'],
R_adj=inner_skull['R'] - min_dist_to_inner_skull)
if check <= 0:
raise ValueError('fixed position is %0.1fmm outside the inner '
'skull boundary' % (-1000 * check,))
# C code computes guesses w/sphere model for speed, don't bother here
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
# fwd_data['inner_skull'] in head frame, bem in mri, confusing...
_prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
verbose=False)
guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards(
fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs)
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, full_matrices=False)
for fwd in np.array_split(guess_fwd,
len(guess_src['rr']))]
guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd,
fwd_orig=guess_fwd_orig, scales=guess_fwd_scales)
del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed
logger.info('[done %d source%s]' % (guess_src['nuse'],
_pl(guess_src['nuse'])))
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
fun = _fit_dipole_fixed if fixed_position else _fit_dipole
out = _fit_dipoles(
fun, min_dist_to_inner_skull, data, times, guess_src['rr'],
guess_data, fwd_data, whitener, ori, n_jobs, rank, tol)
assert len(out) == 8
if fixed_position and ori is not None:
# DipoleFixed
data = np.array([out[1], out[3]])
out_info = deepcopy(info)
loc = np.concatenate([pos, ori, np.zeros(6)])
out_info._unlocked = True
out_info['chs'] = [
dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM,
coil_type=FIFF.FIFFV_COIL_DIPOLE,
unit_mul=0, range=1, cal=1., scanno=1, logno=1),
dict(ch_name='goodness', loc=np.full(12, np.nan),
kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
coil_type=FIFF.FIFFV_COIL_NONE,
unit_mul=0, range=1., cal=1., scanno=2, logno=100)]
for key in ['hpi_meas', 'hpi_results', 'projs']:
out_info[key] = list()
for key in ['acq_pars', 'acq_stim', 'description', 'dig',
'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name',
'subject_info']:
out_info[key] = None
out_info._unlocked = False
out_info['bads'] = []
out_info._update_redundant()
out_info._check_consistency()
dipoles = DipoleFixed(out_info, data, times, evoked.nave,
evoked._aspect_kind, comment=comment)
else:
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment,
out[4], out[5], out[6])
residual = evoked.copy().apply_proj() # set the projs active
residual.data[picks] = np.dot(proj_op, out[-1])
logger.info('%d time points fitted' % len(dipoles.times))
return dipoles, residual
def get_phantom_dipoles(kind='vectorview'):
"""Get standard phantom dipole locations and orientations.
Parameters
----------
kind : str
Get the information for the given system:
``vectorview`` (default)
The Neuromag VectorView phantom.
``otaniemi``
The older Neuromag phantom used at Otaniemi.
Returns
-------
pos : ndarray, shape (n_dipoles, 3)
The dipole positions.
ori : ndarray, shape (n_dipoles, 3)
The dipole orientations.
See Also
--------
mne.datasets.fetch_phantom
Notes
-----
The Elekta phantoms have a radius of 79.5mm, and HPI coil locations
in the XY-plane at the axis extrema (e.g., (79.5, 0), (0, -79.5), ...).
"""
_check_option('kind', kind, ['vectorview', 'otaniemi'])
if kind == 'vectorview':
# these values were pulled from a scanned image provided by
# Elekta folks
a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9])
b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20.0, 19.3])
x = np.concatenate((a, [0] * 8, -b, [0] * 8))
y = np.concatenate(([0] * 8, -a, [0] * 8, b))
c = [22.9, 23.5, 25.5, 23.1, 52.0, 46.4, 41.0, 33.0]
d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9]
z = np.concatenate((c, c, d, d))
signs = ([1, -1] * 4 + [-1, 1] * 4) * 2
elif kind == 'otaniemi':
# these values were pulled from an Neuromag manual
# (NM20456A, 13.7.1999, p.65)
a = np.array([56.3, 47.6, 39.0, 30.3])
b = np.array([32.5, 27.5, 22.5, 17.5])
c = np.zeros(4)
x = np.concatenate((a, b, c, c, -a, -b, c, c))
y = np.concatenate((c, c, -a, -b, c, c, b, a))
z = np.concatenate((b, a, b, a, b, a, a, b))
signs = [-1] * 8 + [1] * 16 + [-1] * 8
pos = np.vstack((x, y, z)).T / 1000.
# Locs are always in XZ or YZ, and so are the oris. The oris are
# also in the same plane and tangential, so it's easy to determine
# the orientation.
ori = list()
for pi, this_pos in enumerate(pos):
this_ori = np.zeros(3)
idx = np.where(this_pos == 0)[0]
# assert len(idx) == 1
idx = np.setdiff1d(np.arange(3), idx[0])
this_ori[idx] = (this_pos[idx][::-1] /
np.linalg.norm(this_pos[idx])) * [1, -1]
this_ori *= signs[pi]
# Now we have this quality, which we could uncomment to
# double-check:
# np.testing.assert_allclose(np.dot(this_ori, this_pos) /
# np.linalg.norm(this_pos), 0,
# atol=1e-15)
ori.append(this_ori)
ori = np.array(ori)
return pos, ori
def _concatenate_dipoles(dipoles):
"""Concatenate a list of dipoles."""
times, pos, amplitude, ori, gof = [], [], [], [], []
for dipole in dipoles:
times.append(dipole.times)
pos.append(dipole.pos)
amplitude.append(dipole.amplitude)
ori.append(dipole.ori)
gof.append(dipole.gof)
return Dipole(np.concatenate(times), np.concatenate(pos),
np.concatenate(amplitude), np.concatenate(ori),
np.concatenate(gof), name=None)
| 37.586617
| 79
| 0.561987
|
from copy import deepcopy
import functools
from functools import partial
import re
import numpy as np
from .cov import compute_whitener, _ensure_cov
from .io.constants import FIFF
from .io.pick import pick_types
from .io.proj import make_projector, _needs_eeg_average_ref_proj
from .bem import _fit_sphere
from .evoked import _read_evoked, _aspect_rev, _write_evokeds
from .fixes import pinvh
from ._freesurfer import read_freesurfer_lut, _get_aseg
from .transforms import _print_coord_trans, _coord_frame_name, apply_trans
from .viz.evoked import _plot_evoked
from ._freesurfer import head_to_mni, head_to_mri
from .forward._make_forward import (_get_trans, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .surface import (transform_surface_to, _compute_nearest,
_points_outside_surface)
from .bem import _bem_find_surface, _bem_surf_name
from .source_space import _make_volume_source_space, SourceSpaces
from .parallel import parallel_func
from .utils import (logger, verbose, _time_mask, warn, _check_fname,
check_fname, _pl, fill_doc, _check_option, ShiftTimeMixin,
_svd_lwork, _repeated_svd, _get_blas_funcs, _validate_type,
_VerboseDep)
@fill_doc
class Dipole(_VerboseDep):
@verbose
def __init__(self, times, pos, amplitude, ori, gof,
name=None, conf=None, khi2=None, nfree=None,
*, verbose=None):
self.times = np.array(times)
self.pos = np.array(pos)
self.amplitude = np.array(amplitude)
self.ori = np.array(ori)
self.gof = np.array(gof)
self.name = name
self.conf = dict()
if conf is not None:
for key, value in conf.items():
self.conf[key] = np.array(value)
self.khi2 = np.array(khi2) if khi2 is not None else None
self.nfree = np.array(nfree) if nfree is not None else None
def __repr__(self):
s = "n_times : %s" % len(self.times)
s += ", tmin : %0.3f" % np.min(self.times)
s += ", tmax : %0.3f" % np.max(self.times)
return "<Dipole | %s>" % s
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
fname = _check_fname(fname, overwrite=overwrite)
if fname.endswith('.bdip'):
_write_dipole_bdip(fname, self)
else:
_write_dipole_text(fname, self)
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
sfreq = None
if len(self.times) > 1:
sfreq = 1. / np.median(np.diff(self.times))
mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq,
include_tmax=include_tmax)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori',
'khi2', 'nfree'):
if getattr(self, attr) is not None:
setattr(self, attr, getattr(self, attr)[mask])
for key in self.conf.keys():
self.conf[key] = self.conf[key][mask]
return self
def copy(self):
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
mode='orthoview', coord_frame='mri', idx='gof',
show_all=True, ax=None, block=False, show=True,
scale=5e-3, color=(1.0, 0.0, 0.0), fig=None,
verbose=None, title=None):
_check_option('mode', mode, [None, 'arrow', 'sphere', 'orthoview'])
from .viz import plot_dipole_locations
return plot_dipole_locations(
self, trans, subject, subjects_dir, mode, coord_frame, idx,
show_all, ax, block, show, scale=scale, color=color, fig=fig,
title=title)
@verbose
def to_mni(self, subject, trans, subjects_dir=None,
verbose=None):
mri_head_t, trans = _get_trans(trans)
return head_to_mni(self.pos, subject, mri_head_t,
subjects_dir=subjects_dir, verbose=verbose)
@verbose
def to_mri(self, subject, trans, subjects_dir=None,
verbose=None):
mri_head_t, trans = _get_trans(trans)
return head_to_mri(self.pos, subject, mri_head_t,
subjects_dir=subjects_dir, verbose=verbose)
@verbose
def to_volume_labels(self, trans, subject='fsaverage', aseg='aparc+aseg',
subjects_dir=None, verbose=None):
aseg_img, aseg_data = _get_aseg(aseg, subject, subjects_dir)
mri_vox_t = np.linalg.inv(aseg_img.header.get_vox2ras_tkr())
lut_inv = read_freesurfer_lut()[0]
lut = {v: k for k, v in lut_inv.items()}
pos = self.to_mri(subject, trans, subjects_dir=subjects_dir,
verbose=verbose)
pos = apply_trans(mri_vox_t, pos)
pos = np.rint(pos).astype(int)
labels = [lut.get(aseg_data[tuple(coord)], 'Unknown') for coord in pos]
return labels
def plot_amplitudes(self, color='k', show=True):
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, item):
if isinstance(item, int):
item = [item]
selected_times = self.times[item].copy()
selected_pos = self.pos[item, :].copy()
selected_amplitude = self.amplitude[item].copy()
selected_ori = self.ori[item, :].copy()
selected_gof = self.gof[item].copy()
selected_name = self.name
selected_conf = dict()
for key in self.conf.keys():
selected_conf[key] = self.conf[key][item]
selected_khi2 = self.khi2[item] if self.khi2 is not None else None
selected_nfree = self.nfree[item] if self.nfree is not None else None
return Dipole(
selected_times, selected_pos, selected_amplitude, selected_ori,
selected_gof, selected_name, selected_conf, selected_khi2,
selected_nfree)
def __len__(self):
return self.pos.shape[0]
def _read_dipole_fixed(fname):
logger.info('Reading %s ...' % fname)
info, nave, aspect_kind, comment, times, data, _ = _read_evoked(fname)
return DipoleFixed(info, data, times, nave, aspect_kind, comment=comment)
@fill_doc
class DipoleFixed(ShiftTimeMixin, _VerboseDep):
@verbose
def __init__(self, info, data, times, nave, aspect_kind,
comment='', *, verbose=None):
self.info = info
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(aspect_kind, 'unknown')
self.comment = comment
self.times = times
self.data = data
self.preload = True
self._update_first_last()
def __repr__(self):
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<DipoleFixed | %s>" % s
def copy(self):
return deepcopy(self)
@property
def ch_names(self):
return self.info['ch_names']
@verbose
def save(self, fname, verbose=None):
check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz',
'_dip.fif', '_dip.fif.gz',),
('.fif', '.fif.gz'))
_write_evokeds(fname, self, check=False)
def plot(self, show=True, time_unit='s'):
return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show,
ylim=None, xlim='tight', proj=False, hline=None,
units=None, scalings=None, titles=None, axes=None,
gfp=False, window_title=None, spatial_colors=False,
plot_type="butterfly", selectable=False,
time_unit=time_unit)
p.diff(data[:, [fields.index('begin/ms'),
fields.index('end/ms')]], 1, -1).any():
warn('begin and end fields differed, but only begin will be used '
'to store time values')
# Find the correct column in our data array, then scale to proper units
idx = [fields.index(field) for field in required_fields]
assert len(idx) >= 9
times = data[:, idx[0]] / 1000.
pos = 1e-3 * data[:, idx[1:4]] # put data in meters
amplitude = data[:, idx[4]]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, idx[5:8]] / norm[:, np.newaxis]
gof = data[:, idx[8]]
# Deal with optional fields
optional = [None] * 2
for fi, field in enumerate(optional_fields[:2]):
if field in fields:
optional[fi] = data[:, fields.index(field)]
khi2, nfree = optional
conf = dict()
for field, scale in zip(optional_fields[2:], conf_scales): # confidence
if field in fields:
conf[field.split('/')[0]] = scale * data[:, fields.index(field)]
return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)
def _write_dipole_text(fname, dip):
fmt = ' %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f'
header = ('
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%')
t = dip.times[:, np.newaxis] * 1000.
gof = dip.gof[:, np.newaxis]
amp = 1e9 * dip.amplitude[:, np.newaxis]
out = (t, t, dip.pos / 1e-3, amp, dip.ori * amp, gof)
# optional fields
fmts = dict(khi2=(' khi^2', ' %8.1f', 1.),
nfree=(' free', ' %5d', 1),
vol=(' vol/mm^3', ' %9.3f', 1e9),
depth=(' depth/mm', ' %9.3f', 1e3),
long=(' long/mm', ' %8.3f', 1e3),
trans=(' trans/mm', ' %9.3f', 1e3),
qlong=(' Qlong/nAm', ' %10.3f', 1e9),
qtrans=(' Qtrans/nAm', ' %11.3f', 1e9),
)
for key in ('khi2', 'nfree'):
data = getattr(dip, key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
for key in ('vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'):
data = dip.conf.get(key)
if data is not None:
header += fmts[key][0]
fmt += fmts[key][1]
out += (data[:, np.newaxis] * fmts[key][2],)
out = np.concatenate(out, axis=-1)
# NB CoordinateSystem is hard-coded as Head here
with open(fname, 'wb') as fid:
fid.write('
fid.write((header + '\n').encode('utf-8'))
np.savetxt(fid, out, fmt=fmt)
if dip.name is not None:
fid.write(('encode('utf-8'))
_BDIP_ERROR_KEYS = ('depth', 'long', 'trans', 'qlong', 'qtrans')
def _read_dipole_bdip(fname):
name = None
nfree = None
with open(fname, 'rb') as fid:
# Which dipole in a multi-dipole set
times = list()
pos = list()
amplitude = list()
ori = list()
gof = list()
conf = dict(vol=list())
khi2 = list()
has_errors = None
while True:
num = np.frombuffer(fid.read(4), '>i4')
if len(num) == 0:
break
times.append(np.frombuffer(fid.read(4), '>f4')[0])
fid.read(4) # end
fid.read(12) # r0
pos.append(np.frombuffer(fid.read(12), '>f4'))
Q = np.frombuffer(fid.read(12), '>f4')
amplitude.append(np.linalg.norm(Q))
ori.append(Q / amplitude[-1])
gof.append(100 * np.frombuffer(fid.read(4), '>f4')[0])
this_has_errors = bool(np.frombuffer(fid.read(4), '>i4')[0])
if has_errors is None:
has_errors = this_has_errors
for key in _BDIP_ERROR_KEYS:
conf[key] = list()
assert has_errors == this_has_errors
fid.read(4) # Noise level used for error computations
limits = np.frombuffer(fid.read(20), '>f4') # error limits
for key, lim in zip(_BDIP_ERROR_KEYS, limits):
conf[key].append(lim)
fid.read(100) # (5, 5) fully describes the conf. ellipsoid
conf['vol'].append(np.frombuffer(fid.read(4), '>f4')[0])
khi2.append(np.frombuffer(fid.read(4), '>f4')[0])
fid.read(4) # prob
fid.read(4) # total noise estimate
return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)
def _write_dipole_bdip(fname, dip):
with open(fname, 'wb+') as fid:
for ti, t in enumerate(dip.times):
fid.write(np.zeros(1, '>i4').tobytes()) # int dipole
fid.write(np.array([t, 0]).astype('>f4').tobytes())
fid.write(np.zeros(3, '>f4').tobytes()) # r0
fid.write(dip.pos[ti].astype('>f4').tobytes()) # pos
Q = dip.amplitude[ti] * dip.ori[ti]
fid.write(Q.astype('>f4').tobytes())
fid.write(np.array(dip.gof[ti] / 100., '>f4').tobytes())
has_errors = int(bool(len(dip.conf)))
fid.write(np.array(has_errors, '>i4').tobytes()) # has_errors
fid.write(np.zeros(1, '>f4').tobytes()) # noise level
for key in _BDIP_ERROR_KEYS:
val = dip.conf[key][ti] if key in dip.conf else 0.
assert val.shape == ()
fid.write(np.array(val, '>f4').tobytes())
fid.write(np.zeros(25, '>f4').tobytes())
conf = dip.conf['vol'][ti] if 'vol' in dip.conf else 0.
fid.write(np.array(conf, '>f4').tobytes())
khi2 = dip.khi2[ti] if dip.khi2 is not None else 0
fid.write(np.array(khi2, '>f4').tobytes())
fid.write(np.zeros(1, '>f4').tobytes()) # prob
fid.write(np.zeros(1, '>f4').tobytes()) # total noise est
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, silent=True)
B = np.concatenate(B, axis=1)
assert np.isfinite(B).all()
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
_, _, dgemm = _get_ddot_dgemv_dgemm()
B = dgemm(1., B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
scales = np.ones(3)
return B, B_orig, scales
@verbose
def _make_guesses(surf, grid, exclude, mindist, n_jobs=1, verbose=None):
if 'rr' in surf:
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_surf_name[surf['id']],
_coord_frame_name(surf['coord_frame'])))
else:
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * surf['R']))
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)[0]
assert 'vertno' in src
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']), type='discrete')
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None,
lwork=None):
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = _repeated_svd(fwd, lwork, overwrite_a=True)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
return 1. - gof
@functools.lru_cache(None)
def _get_ddot_dgemv_dgemm():
return _get_blas_funcs(np.float64, ('dot', 'gemv', 'gemm'))
def _dipole_gof(uu, sing, vv, B, B2):
ddot, dgemv, _ = _get_ddot_dgemv_dgemm()
ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2
one = dgemv(1., vv[:ncomp], B)
Bm2 = ddot(one, one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None):
from scipy import linalg
if 'fwd' in fwd_data:
assert rd is None
fwd = fwd_data['fwd']
assert fwd.shape[0] == 3
fwd_orig = fwd_data['fwd_orig']
assert fwd_orig.shape[0] == 3
scales = fwd_data['scales']
assert scales.shape == (3,)
fwd_svd = fwd_data['fwd_svd'][0]
else:
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
fwd_svd = None
if ori is None:
if fwd_svd is None:
fwd_svd = linalg.svd(fwd, full_matrices=False)
uu, sing, vv = fwd_svd
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
one /= sing[:ncomp]
Q = np.dot(one, uu.T[:ncomp])
else:
fwd = np.dot(ori[np.newaxis], fwd)
sing = np.linalg.norm(fwd)
one = np.dot(fwd / sing, B)
gof = (one * one)[0] / B2
Q = ori * np.sum(one / sing)
ncomp = 3
Q *= scales[0]
B_residual_noproj = B_orig - np.dot(fwd_orig.T, Q)
return Q, gof, B_residual_noproj, ncomp
def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,
guess_data, fwd_data, whitener, ori, n_jobs, rank, rhoend):
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(fun, n_jobs)
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank, rhoend)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100
conf = None
if res[0][4] is not None:
conf = np.array([r[4] for r in res])
keys = ['vol', 'depth', 'long', 'trans', 'qlong', 'qtrans']
conf = {key: conf[:, ki] for ki, key in enumerate(keys)}
khi2 = np.array([r[5] for r in res])
nfree = np.array([r[6] for r in res])
residual_noproj = np.array([r[7] for r in res]).T
return pos, amp, ori, gof, conf, khi2, nfree, residual_noproj
def _fit_confidence(rd, Q, ori, whitener, fwd_data):
from scipy import linalg
direction = np.empty((3, 3))
direction[0] = ori
rvec = rd - fwd_data['inner_skull']['r0']
direction[2] = rvec - ori * np.dot(ori, rvec)
direction[2] /= np.linalg.norm(direction[2])
direction[1] = np.cross(direction[2], direction[0])
assert np.allclose(np.dot(direction, direction.T), np.eye(3))
deltas = (-1e-4, 1e-4)
J = np.empty((whitener.shape[0], 6))
for ii in range(3):
fwds = []
for delta in deltas:
this_r = rd[np.newaxis] + delta * direction[ii]
fwds.append(
np.dot(Q, _dipole_forwards(fwd_data, whitener, this_r)[0]))
J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q)
this_fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis])[0]
for ii in range(3):
fwds = []
for delta in deltas:
fwds.append(np.dot(Q + delta * direction[ii], this_fwd))
J[:, ii + 3] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# However, the units in the Jacobian are potentially quite different,
# so we need to do some normalization during inversion, then revert.
direction_norm = np.linalg.norm(J[:, :3])
Q_norm = np.linalg.norm(J[:, 3:5]) # omit possible zero Z
norm = np.array([direction_norm] * 3 + [Q_norm] * 3)
J /= norm
J = np.dot(J.T, J)
C = pinvh(J, rtol=1e-14)
C /= norm
C /= norm[:, np.newaxis]
conf = 1.96 * np.sqrt(np.diag(C))
# The confidence volume of the dipole location is obtained from by
# taking the eigenvalues of the upper left submatrix and computing
# v = 4π/3 √(c^3 λ1 λ2 λ3) with c = 7.81, or:
vol_conf = 4 * np.pi / 3. * np.sqrt(
476.379541 * np.prod(linalg.eigh(C[:3, :3], eigvals_only=True)))
conf = np.concatenate([conf, [vol_conf]])
# Now we reorder and subselect the proper columns:
# vol, depth, long, trans, Qlong, Qtrans (discard Qdepth, assumed zero)
conf = conf[[6, 2, 0, 1, 3, 4]]
return conf
def _surface_constraint(rd, surf, min_dist_to_inner_skull):
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
dist -= min_dist_to_inner_skull
return dist
def _sphere_constraint(rd, r0, R_adj):
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, fmin_cobyla, ori, rank,
rhoend):
B = np.dot(whitener, B_orig)
if 'rr' in fwd_data['inner_skull']:
surf = fwd_data['inner_skull']
constraint = partial(_surface_constraint, surf=surf,
min_dist_to_inner_skull=min_dist_to_inner_skull)
else:
surf = None
constraint = partial(
_sphere_constraint, r0=fwd_data['inner_skull']['r0'],
R_adj=fwd_data['inner_skull']['R'] - min_dist_to_inner_skull)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, B
idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_data['fwd_svd'])])
x0 = guess_rrs[idx]
lwork = _svd_lwork((3, B.shape[0]))
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener,
lwork=lwork)
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=rhoend, disp=False)
Q, gof, residual_noproj, n_comp = _fit_Q(
fwd_data, whitener, B, B2, B_orig, rd_final, ori=ori)
khi2 = (1 - gof) * B2
nfree = rank - n_comp
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(
surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank, rhoend):
B = np.dot(whitener, B_orig)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, np.zeros(6)
Q, gof, residual_noproj = _fit_Q(guess_data, whitener, B, B2, B_orig,
rd=None, ori=ori)[:3]
if ori is None:
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
else:
amp = np.dot(Q, ori)
rd_final = guess_rrs[0]
# conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)
conf = khi2 = nfree = None
# No corresponding 'logger' message here because it should go *very* fast
return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
pos=None, ori=None, rank=None, accuracy='normal', tol=5e-5,
verbose=None):
from scipy import linalg
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
_validate_type(accuracy, str, 'accuracy')
_check_option('accuracy', accuracy, ('accurate', 'normal'))
# Determine if a list of projectors has an average EEG ref
if _needs_eeg_average_ref_proj(evoked.info):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
if ori is not None and pos is None:
raise ValueError('pos must be provided if ori is not None')
data = evoked.data
if not np.isfinite(data).all():
raise ValueError('Evoked data must be finite')
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
del min_dist
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[]))
if isinstance(bem, str):
bem_extra = bem
else:
bem_extra = repr(bem)
logger.info('BEM : %s' % bem_extra)
mri_head_t, trans = _get_trans(trans)
logger.info('MRI transform : %s' % trans)
bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=False)
if not bem['is_sphere']:
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
# r0 back to head frame for logging
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
inner_skull['r0'] = r0
logger.info('Head origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
del R, r0
else:
r0 = bem['r0']
if len(bem.get('layers', [])) > 0:
R = bem['layers'][0]['rad']
kind = 'rad'
else: # MEG-only
# Use the minimum distance to the MEG sensors as the radius then
R = np.dot(np.linalg.inv(info['dev_head_t']['trans']),
np.hstack([r0, [1.]]))[:3] # r0 -> device
R = R - [info['chs'][pick]['loc'][:3]
for pick in pick_types(info, meg=True, exclude=[])]
if len(R) == 0:
raise RuntimeError('No MEG channels found, but MEG-only '
'sphere model used')
R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors
kind = 'max_rad'
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, '
'%s = %6.1f mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R))
inner_skull = dict(R=R, r0=r0) # NB sphere model defined in head frame
del R, r0
# Deal with DipoleFixed cases here
if pos is not None:
fixed_position = True
pos = np.array(pos, float)
if pos.shape != (3,):
raise ValueError('pos must be None or a 3-element array-like,'
' got %s' % (pos,))
logger.info('Fixed position : %6.1f %6.1f %6.1f mm'
% tuple(1000 * pos))
if ori is not None:
ori = np.array(ori, float)
if ori.shape != (3,):
raise ValueError('oris must be None or a 3-element array-like,'
' got %s' % (ori,))
norm = np.sqrt(np.sum(ori * ori))
if not np.isclose(norm, 1):
raise ValueError('ori must be a unit vector, got length %s'
% (norm,))
logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm'
% tuple(ori))
else:
logger.info('Free orientation : <time-varying>')
fit_n_jobs = 1 # only use 1 job to do the guess fitting
else:
fixed_position = False
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = max(0.005, min_dist_to_inner_skull)
guess_exclude = 0.02
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm'
% (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm'
% (1000 * guess_exclude,))
logger.info(f'Using {accuracy} MEG coil definitions.')
fit_n_jobs = n_jobs
cov = _ensure_cov(cov)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = evoked.get_channel_types()
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, exclude='bads',
accuracy=accuracy, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True, ref_meg=False)
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float64)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener, _, rank = compute_whitener(cov, info, picks=picks,
rank=rank, return_rank=True)
# Proceed to computing the fits (make_guess_data)
if fixed_position:
guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True]))
logger.info('Compute forward for dipole location...')
else:
logger.info('\n---- Computing the forward solution for the guesses...')
guess_src = _make_guesses(inner_skull, guess_grid, guess_exclude,
guess_mindist, n_jobs=n_jobs)[0]
# grid coordinates go from mri to head frame
transform_surface_to(guess_src, 'head', mri_head_t)
logger.info('Go through all guess source locations...')
# inner_skull goes from mri to head frame
if 'rr' in inner_skull:
transform_surface_to(inner_skull, 'head', mri_head_t)
if fixed_position:
if 'rr' in inner_skull:
check = _surface_constraint(pos, inner_skull,
min_dist_to_inner_skull)
else:
check = _sphere_constraint(
pos, inner_skull['r0'],
R_adj=inner_skull['R'] - min_dist_to_inner_skull)
if check <= 0:
raise ValueError('fixed position is %0.1fmm outside the inner '
'skull boundary' % (-1000 * check,))
# C code computes guesses w/sphere model for speed, don't bother here
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
_prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
verbose=False)
guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards(
fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs)
guess_fwd_svd = [linalg.svd(fwd, full_matrices=False)
for fwd in np.array_split(guess_fwd,
len(guess_src['rr']))]
guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd,
fwd_orig=guess_fwd_orig, scales=guess_fwd_scales)
del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales
logger.info('[done %d source%s]' % (guess_src['nuse'],
_pl(guess_src['nuse'])))
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
fun = _fit_dipole_fixed if fixed_position else _fit_dipole
out = _fit_dipoles(
fun, min_dist_to_inner_skull, data, times, guess_src['rr'],
guess_data, fwd_data, whitener, ori, n_jobs, rank, tol)
assert len(out) == 8
if fixed_position and ori is not None:
data = np.array([out[1], out[3]])
out_info = deepcopy(info)
loc = np.concatenate([pos, ori, np.zeros(6)])
out_info._unlocked = True
out_info['chs'] = [
dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM,
coil_type=FIFF.FIFFV_COIL_DIPOLE,
unit_mul=0, range=1, cal=1., scanno=1, logno=1),
dict(ch_name='goodness', loc=np.full(12, np.nan),
kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
coil_type=FIFF.FIFFV_COIL_NONE,
unit_mul=0, range=1., cal=1., scanno=2, logno=100)]
for key in ['hpi_meas', 'hpi_results', 'projs']:
out_info[key] = list()
for key in ['acq_pars', 'acq_stim', 'description', 'dig',
'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name',
'subject_info']:
out_info[key] = None
out_info._unlocked = False
out_info['bads'] = []
out_info._update_redundant()
out_info._check_consistency()
dipoles = DipoleFixed(out_info, data, times, evoked.nave,
evoked._aspect_kind, comment=comment)
else:
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment,
out[4], out[5], out[6])
residual = evoked.copy().apply_proj()
residual.data[picks] = np.dot(proj_op, out[-1])
logger.info('%d time points fitted' % len(dipoles.times))
return dipoles, residual
def get_phantom_dipoles(kind='vectorview'):
_check_option('kind', kind, ['vectorview', 'otaniemi'])
if kind == 'vectorview':
a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9])
b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20.0, 19.3])
x = np.concatenate((a, [0] * 8, -b, [0] * 8))
y = np.concatenate(([0] * 8, -a, [0] * 8, b))
c = [22.9, 23.5, 25.5, 23.1, 52.0, 46.4, 41.0, 33.0]
d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9]
z = np.concatenate((c, c, d, d))
signs = ([1, -1] * 4 + [-1, 1] * 4) * 2
elif kind == 'otaniemi':
a = np.array([56.3, 47.6, 39.0, 30.3])
b = np.array([32.5, 27.5, 22.5, 17.5])
c = np.zeros(4)
x = np.concatenate((a, b, c, c, -a, -b, c, c))
y = np.concatenate((c, c, -a, -b, c, c, b, a))
z = np.concatenate((b, a, b, a, b, a, a, b))
signs = [-1] * 8 + [1] * 16 + [-1] * 8
pos = np.vstack((x, y, z)).T / 1000.
# the orientation.
ori = list()
for pi, this_pos in enumerate(pos):
this_ori = np.zeros(3)
idx = np.where(this_pos == 0)[0]
# assert len(idx) == 1
idx = np.setdiff1d(np.arange(3), idx[0])
this_ori[idx] = (this_pos[idx][::-1] /
np.linalg.norm(this_pos[idx])) * [1, -1]
this_ori *= signs[pi]
# Now we have this quality, which we could uncomment to
# double-check:
# np.testing.assert_allclose(np.dot(this_ori, this_pos) /
# np.linalg.norm(this_pos), 0,
# atol=1e-15)
ori.append(this_ori)
ori = np.array(ori)
return pos, ori
def _concatenate_dipoles(dipoles):
times, pos, amplitude, ori, gof = [], [], [], [], []
for dipole in dipoles:
times.append(dipole.times)
pos.append(dipole.pos)
amplitude.append(dipole.amplitude)
ori.append(dipole.ori)
gof.append(dipole.gof)
return Dipole(np.concatenate(times), np.concatenate(pos),
np.concatenate(amplitude), np.concatenate(ori),
np.concatenate(gof), name=None)
| true
| true
|
1c42f4881546fdb2733d87e1b791f656d9e9c56d
| 57,266
|
py
|
Python
|
numpy/polynomial/legendre.py
|
tovrstra/numpy
|
bb5d666e84e2eb294543a67c6143d7e9124d1c73
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/polynomial/legendre.py
|
tovrstra/numpy
|
bb5d666e84e2eb294543a67c6143d7e9124d1c73
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/polynomial/legendre.py
|
tovrstra/numpy
|
bb5d666e84e2eb294543a67c6143d7e9124d1c73
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Legendre Series (:mod: `numpy.polynomial.legendre`)
===================================================
.. currentmodule:: numpy.polynomial.polynomial
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
.. autosummary::
:toctree: generated/
legdomain Legendre series default domain, [-1,1].
legzero Legendre series that evaluates identically to 0.
legone Legendre series that evaluates identically to 1.
legx Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
.. autosummary::
:toctree: generated/
legmulx multiply a Legendre series in P_i(x) by x.
legadd add two Legendre series.
legsub subtract one Legendre series from another.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
legpow raise a Legendre series to an positive integer power
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
leggrid2d evaluate a 2D Legendre series on a Cartesian product.
leggrid3d evaluate a 3D Legendre series on a Cartesian product.
Calculus
--------
.. autosummary::
:toctree: generated/
legder differentiate a Legendre series.
legint integrate a Legendre series.
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots create a Legendre series with specified roots.
legroots find the roots of a Legendre series.
legvander Vandermonde-like matrix for Legendre polynomials.
legvander2d Vandermonde-like matrix for 2D power series.
legvander3d Vandermonde-like matrix for 3D power series.
leggauss Gauss-Legendre quadrature, points and weights.
legweight Legendre weight function.
legcompanion symmetrized companion matrix in Legendre form.
legfit least-squares fit returning a Legendre series.
legtrim trim leading coefficients from a Legendre series.
legline Legendre series representing given straight line.
leg2poly convert a Legendre series to a polynomial.
poly2leg convert a polynomial to a Legendre series.
Classes
-------
Legendre A Legendre series class.
See also
--------
numpy.polynomial.polynomial
numpy.polynomial.chebyshev
numpy.polynomial.laguerre
numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> c = P.Legendre(range(4))
>>> c
Legendre([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1, 1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, chebfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [legline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [legmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = legmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> P.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmul, legdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = legmul(prd, c)
return prd
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([ 60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except Exception:
raise ValueError('x, y are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
return c
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = legval(x, c)
c = legval(y, c)
return c
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except Exception:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
c = legval(z, c, tensor=False)
return c
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = legval(x, c)
c = legval(y, c)
c = legval(z, c)
return c
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.moveaxis(v, 0, -1)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
vz = legvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was
2-D, the coefficients for the data in column k of `y` are in
column `k`. If `deg` is specified as a list, coefficients for
terms not included in the fit are set equal to zero in the
returned `coef`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, polyfit, lagfit, hermfit, hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = legvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = legvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, chebroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = legcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
| 31.292896
| 79
| 0.596043
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
legdomain = np.array([-1, 1])
legzero = np.array([0])
legone = np.array([1])
legx = np.array([0, 1])
def legline(off, scl):
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [legline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [legmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = legmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def legadd(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(c):
[c] = pu.as_series([c])
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(c, pow, maxpower=16):
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
prd = c
for i in range(2, power + 1):
prd = legmul(prd, c)
return prd
def legder(c, m=1, scl=1, axis=0):
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def legval(x, c, tensor=True):
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
try:
x, y = np.array((x, y), copy=0)
except Exception:
raise ValueError('x, y are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
return c
def leggrid2d(x, y, c):
c = legval(x, c)
c = legval(y, c)
return c
def legval3d(x, y, z, c):
try:
x, y, z = np.array((x, y, z), copy=0)
except Exception:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
c = legval(z, c, tensor=False)
return c
def leggrid3d(x, y, z, c):
c = legval(x, c)
c = legval(y, c)
c = legval(z, c)
return c
def legvander(x, deg):
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.moveaxis(v, 0, -1)
def legvander2d(x, y, deg):
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def legvander3d(x, y, z, deg):
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
vz = legvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def legfit(x, y, deg, rcond=None, full=False, w=None):
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = legvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = legvander(x, lmax)[:, deg]
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def legcompanion(c):
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = legcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
| true
| true
|
1c42f6b45d30734090b1df60499e01a2ab06be4b
| 1,635
|
py
|
Python
|
tmp/visualize_vggface.py
|
seonho/facenet
|
c4de25b04b76dc4d16ebe7a328cac27f220040e4
|
[
"MIT"
] | 12
|
2017-11-01T12:35:47.000Z
|
2020-02-26T19:41:30.000Z
|
tmp/visualize_vggface.py
|
KittenCN/pyFaceNet
|
0804d06a3533a83ff865a3c4343cfca2a5cbe063
|
[
"MIT"
] | 8
|
2017-12-05T23:45:54.000Z
|
2022-02-09T23:28:51.000Z
|
tmp/visualize_vggface.py
|
KittenCN/pyFaceNet
|
0804d06a3533a83ff865a3c4343cfca2a5cbe063
|
[
"MIT"
] | 6
|
2017-09-09T12:22:53.000Z
|
2019-12-17T07:54:18.000Z
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import vggface16
def main():
sess = tf.Session()
t_input = tf.placeholder(np.float32, name='input') # define the input tensor
image_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-image_mean, 0)
# Build the inference graph
nodes = vggface16.load('../data/vgg_face.mat', t_preprocessed)
img_noise = np.random.uniform(size=(224,224,3)) + 117.0
# Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
# to have non-zero gradients for features with negative initial activations.
layer = 'conv5_3'
channel = 139 # picking some feature channel to visualize
img = render_naive(sess, t_input, nodes[layer][:,:,:,channel], img_noise)
showarray(img)
def showarray(a):
a = np.uint8(np.clip(a, 0, 1)*255)
plt.imshow(a)
plt.show()
def visstd(a, s=0.1):
'''Normalize the image range for visualization'''
return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5
def render_naive(sess, t_input, t_obj, img0, iter_n=20, step=1.0):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for _ in range(iter_n):
g, _ = sess.run([t_grad, t_score], {t_input:img})
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
return visstd(img)
if __name__ == '__main__':
main()
| 32.7
| 97
| 0.659939
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import vggface16
def main():
sess = tf.Session()
t_input = tf.placeholder(np.float32, name='input')
image_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-image_mean, 0)
nodes = vggface16.load('../data/vgg_face.mat', t_preprocessed)
img_noise = np.random.uniform(size=(224,224,3)) + 117.0
layer = 'conv5_3'
channel = 139
img = render_naive(sess, t_input, nodes[layer][:,:,:,channel], img_noise)
showarray(img)
def showarray(a):
a = np.uint8(np.clip(a, 0, 1)*255)
plt.imshow(a)
plt.show()
def visstd(a, s=0.1):
return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5
def render_naive(sess, t_input, t_obj, img0, iter_n=20, step=1.0):
t_score = tf.reduce_mean(t_obj)
t_grad = tf.gradients(t_score, t_input)[0]
img = img0.copy()
for _ in range(iter_n):
g, _ = sess.run([t_grad, t_score], {t_input:img})
g /= g.std()+1e-8
img += g*step
return visstd(img)
if __name__ == '__main__':
main()
| true
| true
|
1c42f8a174a74c0673de846fda4cbf3a2ae15373
| 28,177
|
py
|
Python
|
django/contrib/contenttypes/fields.py
|
peteralexandercharles/django
|
61c7350f41f2534daf3888709f3c987b7d779a29
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
django/contrib/contenttypes/fields.py
|
peteralexandercharles/django
|
61c7350f41f2534daf3888709f3c987b7d779a29
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
django/contrib/contenttypes/fields.py
|
peteralexandercharles/django
|
61c7350f41f2534daf3888709f3c987b7d779a29
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
import functools
import itertools
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING, ForeignObject, ForeignObjectRel
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.mixins import FieldCacheMixin
from django.db.models.fields.related import (
ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.db.models.sql import AND
from django.db.models.sql.where import WhereNode
from django.utils.functional import cached_property
class GenericForeignKey(FieldCacheMixin):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ForwardManyToOneDescriptor) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(
self, ct_field="content_type", fk_field="object_id", for_concrete_model=True
):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
cls._meta.add_field(self, private=True)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
"""See corresponding method on Field"""
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
"""See corresponding method on RelatedField"""
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
return "%s.%s" % (model._meta.label, self.name)
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_object_id_field(),
*self._check_content_type_field(),
]
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
"Field names must not end with an underscore.",
obj=self,
id="fields.E001",
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the "
"nonexistent field '%s'." % self.fk_field,
obj=self,
id="contenttypes.E001",
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the "
"nonexistent field '%s.%s'."
% (self.model._meta.object_name, self.ct_field),
obj=self,
id="contenttypes.E002",
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey."
% (self.model._meta.object_name, self.ct_field),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id="contenttypes.E003",
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'."
% (self.model._meta.object_name, self.ct_field),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id="contenttypes.E004",
)
]
else:
return []
def get_cache_name(self):
return self.name
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model
)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(
id=ct_id, using=obj._state.db
).model_class()
return (
model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model,
)
return (
ret_val,
lambda obj: (obj.pk, obj.__class__),
gfk_key,
True,
self.name,
True,
)
def __get__(self, instance, cls=None):
if instance is None:
return self
# Don't use getattr(instance, self.ct_field) here because that might
# reload the same ContentType over and over (#5570). Instead, get the
# content type ID here, and later when the actual instance is needed,
# use ContentType.objects.get_for_id(), which has a global cache.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
pk_val = getattr(instance, self.fk_field)
rel_obj = self.get_cached_value(instance, default=None)
if rel_obj is not None:
ct_match = (
ct_id == self.get_content_type(obj=rel_obj, using=instance._state.db).id
)
pk_match = rel_obj._meta.pk.to_python(pk_val) == rel_obj.pk
if ct_match and pk_match:
return rel_obj
else:
rel_obj = None
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=pk_val)
except ObjectDoesNotExist:
pass
self.set_cached_value(instance, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value.pk
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
self.set_cached_value(instance, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
):
super().__init__(
field,
to,
related_name=related_query_name or "+",
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
empty_strings_allowed = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
mti_inherited = False
def __init__(
self,
to,
object_id_field="object_id",
content_type_field="content_type",
for_concrete_model=True,
related_query_name=None,
limit_choices_to=None,
**kwargs,
):
kwargs["rel"] = self.rel_class(
self,
to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
# Reverse relations are always nullable (Django can't enforce that a
# foreign key on the related model points to this model).
kwargs["null"] = True
kwargs["blank"] = True
kwargs["on_delete"] = models.CASCADE
kwargs["editable"] = False
kwargs["serialize"] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super().__init__(to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_generic_foreign_key_existence(),
]
def _is_matching_generic_foreign_key(self, field):
"""
Return True if field is a GenericForeignKey whose content type and
object id fields correspond to the equivalent attributes on this
GenericRelation.
"""
return (
isinstance(field, GenericForeignKey)
and field.ct_field == self.content_type_field_name
and field.fk_field == self.object_id_field_name
)
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.private_fields
if any(self._is_matching_generic_foreign_key(field) for field in fields):
return []
else:
return [
checks.Error(
"The GenericRelation defines a relation with the model "
"'%s', but that model does not have a GenericForeignKey."
% target._meta.label,
obj=self,
id="contenttypes.E004",
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [
(
self.remote_field.model._meta.get_field(self.object_id_field_name),
self.model._meta.pk,
)
]
def _get_path_info_with_parent(self, filtered_relation):
"""
Return the path that joins the current model through any parent models.
The idea is that if you have a GFK defined on a parent model then we
need to join the parent model first, then the child model.
"""
# With an inheritance chain ChildTag -> Tag and Tag defines the
# GenericForeignKey, and a TaggedItem model has a GenericRelation to
# ChildTag, then we need to generate a join from TaggedItem to Tag
# (as Tag.object_id == TaggedItem.pk), and another join from Tag to
# ChildTag (as that is where the relation is to). Do this by first
# generating a join to the parent model, then generating joins to the
# child models.
path = []
opts = self.remote_field.model._meta.concrete_model._meta
parent_opts = opts.get_field(self.object_id_field_name).model._meta
target = parent_opts.pk
path.append(
PathInfo(
from_opts=self.model._meta,
to_opts=parent_opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
)
)
# Collect joins needed for the parent -> child chain. This is easiest
# to do if we collect joins for the child -> parent chain and then
# reverse the direction (call to reverse() and use of
# field.remote_field.get_path_info()).
parent_field_chain = []
while parent_opts != opts:
field = opts.get_ancestor_link(parent_opts.model)
parent_field_chain.append(field)
opts = field.remote_field.model._meta
parent_field_chain.reverse()
for field in parent_field_chain:
path.extend(field.remote_field.get_path_info())
return path
def get_path_info(self, filtered_relation=None):
opts = self.remote_field.model._meta
object_id_field = opts.get_field(self.object_id_field_name)
if object_id_field.model != opts.model:
return self._get_path_info_with_parent(filtered_relation)
else:
target = opts.pk
return [
PathInfo(
from_opts=self.model._meta,
to_opts=opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
)
]
def get_reverse_path_info(self, filtered_relation=None):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)
]
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return str([instance.pk for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs["private_only"] = True
super().contribute_to_class(cls, name, **kwargs)
self.model = cls
# Disable the reverse relation for fields inherited by subclasses of a
# model in multi-table inheritance. The reverse relation points to the
# field of the base model.
if self.mti_inherited:
self.remote_field.related_name = "+"
self.remote_field.related_query_name = None
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() to the model this
# field belongs to, if the model on the other end of this relation
# is ordered with respect to its corresponding GenericForeignKey.
if not cls._meta.abstract:
def make_generic_foreign_order_accessors(related_model, model):
if self._is_matching_generic_foreign_key(
model._meta.order_with_respect_to
):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(
make_generic_foreign_order_accessors,
self.model,
self.remote_field.model,
)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(
self.model, for_concrete_model=self.for_concrete_model
)
def get_extra_restriction(self, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
lookup = field.get_lookup("exact")(field.get_col(remote_alias), contenttype_pk)
return WhereNode([lookup], connector=AND)
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(
**{
"%s__pk"
% self.content_type_field_name: ContentType.objects.db_manager(using)
.get_for_model(self.model, for_concrete_model=self.for_concrete_model)
.pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs],
}
)
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericManyToOneDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
self.model = rel.model
self.get_content_type = functools.partial(
ContentType.objects.db_manager(instance._state.db).get_for_model,
for_concrete_model=rel.field.for_concrete_model,
)
self.content_type = self.get_content_type(instance)
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance.pk
self.core_filters = {
"%s__pk" % self.content_type_field_name: self.content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
return queryset.using(db).filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
# Group instances by content types.
content_type_queries = (
models.Q(
(f"{self.content_type_field_name}__pk", content_type_id),
(f"{self.object_id_field_name}__in", {obj.pk for obj in objs}),
)
for content_type_id, objs in itertools.groupby(
sorted(instances, key=lambda obj: self.get_content_type(obj).pk),
lambda obj: self.get_content_type(obj).pk,
)
)
query = models.Q(*content_type_queries, _connector=models.Q.OR)
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
content_type_id_field_name = "%s_id" % self.content_type_field_name
return (
queryset.filter(query),
lambda relobj: (
object_id_converter(getattr(relobj, self.object_id_field_name)),
getattr(relobj, content_type_id_field_name),
),
lambda obj: (obj.pk, self.get_content_type(obj).pk),
False,
self.prefetch_cache_name,
False,
)
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError(
"'%s' instance expected, got %r"
% (self.model._meta.object_name, obj)
)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(
**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
}
)
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, bulk=True):
if not objs:
return
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
self._remove_prefetched_objects()
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
| 36.97769
| 88
| 0.578876
|
import functools
import itertools
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING, ForeignObject, ForeignObjectRel
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.mixins import FieldCacheMixin
from django.db.models.fields.related import (
ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.db.models.sql import AND
from django.db.models.sql.where import WhereNode
from django.utils.functional import cached_property
class GenericForeignKey(FieldCacheMixin):
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(
self, ct_field="content_type", fk_field="object_id", for_concrete_model=True
):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
cls._meta.add_field(self, private=True)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
return "%s.%s" % (model._meta.label, self.name)
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_object_id_field(),
*self._check_content_type_field(),
]
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
"Field names must not end with an underscore.",
obj=self,
id="fields.E001",
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the "
"nonexistent field '%s'." % self.fk_field,
obj=self,
id="contenttypes.E001",
)
]
else:
return []
def _check_content_type_field(self):
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the "
"nonexistent field '%s.%s'."
% (self.model._meta.object_name, self.ct_field),
obj=self,
id="contenttypes.E002",
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey."
% (self.model._meta.object_name, self.ct_field),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id="contenttypes.E003",
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'."
% (self.model._meta.object_name, self.ct_field),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id="contenttypes.E004",
)
]
else:
return []
def get_cache_name(self):
return self.name
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model
)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
fk_dict = defaultdict(set)
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(
id=ct_id, using=obj._state.db
).model_class()
return (
model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model,
)
return (
ret_val,
lambda obj: (obj.pk, obj.__class__),
gfk_key,
True,
self.name,
True,
)
def __get__(self, instance, cls=None):
if instance is None:
return self
# reload the same ContentType over and over (#5570). Instead, get the
# content type ID here, and later when the actual instance is needed,
# use ContentType.objects.get_for_id(), which has a global cache.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
pk_val = getattr(instance, self.fk_field)
rel_obj = self.get_cached_value(instance, default=None)
if rel_obj is not None:
ct_match = (
ct_id == self.get_content_type(obj=rel_obj, using=instance._state.db).id
)
pk_match = rel_obj._meta.pk.to_python(pk_val) == rel_obj.pk
if ct_match and pk_match:
return rel_obj
else:
rel_obj = None
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=pk_val)
except ObjectDoesNotExist:
pass
self.set_cached_value(instance, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value.pk
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
self.set_cached_value(instance, value)
class GenericRel(ForeignObjectRel):
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
):
super().__init__(
field,
to,
related_name=related_query_name or "+",
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
# Field flags
auto_created = False
empty_strings_allowed = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
mti_inherited = False
def __init__(
self,
to,
object_id_field="object_id",
content_type_field="content_type",
for_concrete_model=True,
related_query_name=None,
limit_choices_to=None,
**kwargs,
):
kwargs["rel"] = self.rel_class(
self,
to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
# Reverse relations are always nullable (Django can't enforce that a
kwargs["null"] = True
kwargs["blank"] = True
kwargs["on_delete"] = models.CASCADE
kwargs["editable"] = False
kwargs["serialize"] = False
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super().__init__(to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_generic_foreign_key_existence(),
]
def _is_matching_generic_foreign_key(self, field):
return (
isinstance(field, GenericForeignKey)
and field.ct_field == self.content_type_field_name
and field.fk_field == self.object_id_field_name
)
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.private_fields
if any(self._is_matching_generic_foreign_key(field) for field in fields):
return []
else:
return [
checks.Error(
"The GenericRelation defines a relation with the model "
"'%s', but that model does not have a GenericForeignKey."
% target._meta.label,
obj=self,
id="contenttypes.E004",
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [
(
self.remote_field.model._meta.get_field(self.object_id_field_name),
self.model._meta.pk,
)
]
def _get_path_info_with_parent(self, filtered_relation):
# With an inheritance chain ChildTag -> Tag and Tag defines the
# GenericForeignKey, and a TaggedItem model has a GenericRelation to
# ChildTag, then we need to generate a join from TaggedItem to Tag
# (as Tag.object_id == TaggedItem.pk), and another join from Tag to
# ChildTag (as that is where the relation is to). Do this by first
# generating a join to the parent model, then generating joins to the
# child models.
path = []
opts = self.remote_field.model._meta.concrete_model._meta
parent_opts = opts.get_field(self.object_id_field_name).model._meta
target = parent_opts.pk
path.append(
PathInfo(
from_opts=self.model._meta,
to_opts=parent_opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
)
)
# Collect joins needed for the parent -> child chain. This is easiest
# to do if we collect joins for the child -> parent chain and then
# reverse the direction (call to reverse() and use of
# field.remote_field.get_path_info()).
parent_field_chain = []
while parent_opts != opts:
field = opts.get_ancestor_link(parent_opts.model)
parent_field_chain.append(field)
opts = field.remote_field.model._meta
parent_field_chain.reverse()
for field in parent_field_chain:
path.extend(field.remote_field.get_path_info())
return path
def get_path_info(self, filtered_relation=None):
opts = self.remote_field.model._meta
object_id_field = opts.get_field(self.object_id_field_name)
if object_id_field.model != opts.model:
return self._get_path_info_with_parent(filtered_relation)
else:
target = opts.pk
return [
PathInfo(
from_opts=self.model._meta,
to_opts=opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
)
]
def get_reverse_path_info(self, filtered_relation=None):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)
]
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return str([instance.pk for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs["private_only"] = True
super().contribute_to_class(cls, name, **kwargs)
self.model = cls
# Disable the reverse relation for fields inherited by subclasses of a
# model in multi-table inheritance. The reverse relation points to the
# field of the base model.
if self.mti_inherited:
self.remote_field.related_name = "+"
self.remote_field.related_query_name = None
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() to the model this
# field belongs to, if the model on the other end of this relation
# is ordered with respect to its corresponding GenericForeignKey.
if not cls._meta.abstract:
def make_generic_foreign_order_accessors(related_model, model):
if self._is_matching_generic_foreign_key(
model._meta.order_with_respect_to
):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(
make_generic_foreign_order_accessors,
self.model,
self.remote_field.model,
)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
return ContentType.objects.get_for_model(
self.model, for_concrete_model=self.for_concrete_model
)
def get_extra_restriction(self, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
lookup = field.get_lookup("exact")(field.get_col(remote_alias), contenttype_pk)
return WhereNode([lookup], connector=AND)
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
return self.remote_field.model._base_manager.db_manager(using).filter(
**{
"%s__pk"
% self.content_type_field_name: ContentType.objects.db_manager(using)
.get_for_model(self.model, for_concrete_model=self.for_concrete_model)
.pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs],
}
)
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
self.model = rel.model
self.get_content_type = functools.partial(
ContentType.objects.db_manager(instance._state.db).get_for_model,
for_concrete_model=rel.field.for_concrete_model,
)
self.content_type = self.get_content_type(instance)
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance.pk
self.core_filters = {
"%s__pk" % self.content_type_field_name: self.content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def _apply_rel_filters(self, queryset):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return queryset.using(db).filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
# Group instances by content types.
content_type_queries = (
models.Q(
(f"{self.content_type_field_name}__pk", content_type_id),
(f"{self.object_id_field_name}__in", {obj.pk for obj in objs}),
)
for content_type_id, objs in itertools.groupby(
sorted(instances, key=lambda obj: self.get_content_type(obj).pk),
lambda obj: self.get_content_type(obj).pk,
)
)
query = models.Q(*content_type_queries, _connector=models.Q.OR)
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
content_type_id_field_name = "%s_id" % self.content_type_field_name
return (
queryset.filter(query),
lambda relobj: (
object_id_converter(getattr(relobj, self.object_id_field_name)),
getattr(relobj, content_type_id_field_name),
),
lambda obj: (obj.pk, self.get_content_type(obj).pk),
False,
self.prefetch_cache_name,
False,
)
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError(
"'%s' instance expected, got %r"
% (self.model._meta.object_name, obj)
)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(
**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
}
)
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, bulk=True):
if not objs:
return
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
# Force evaluation of `objs` in case it's a queryset whose value
objs = tuple(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
self._remove_prefetched_objects()
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
| true
| true
|
1c42f8f82a8febca13297af39b3d0800744f2bf8
| 118
|
py
|
Python
|
libtad/common/__init__.py
|
timeanddate/libtad-python
|
8c3b14578ed1f5f5e79cc83b415433f59e39814f
|
[
"MIT"
] | 2
|
2022-01-14T11:35:50.000Z
|
2022-03-07T04:20:14.000Z
|
libtad/common/__init__.py
|
timeanddate/libtad-python
|
8c3b14578ed1f5f5e79cc83b415433f59e39814f
|
[
"MIT"
] | null | null | null |
libtad/common/__init__.py
|
timeanddate/libtad-python
|
8c3b14578ed1f5f5e79cc83b415433f59e39814f
|
[
"MIT"
] | null | null | null |
__all__ = ["exceptions"]
from . import exceptions
from .xml_utils import XmlUtils
def __dir__():
return __all__
| 14.75
| 31
| 0.737288
|
__all__ = ["exceptions"]
from . import exceptions
from .xml_utils import XmlUtils
def __dir__():
return __all__
| true
| true
|
1c42f9379e2d32367bc0fd48e0a3dddb47acabc8
| 14,990
|
py
|
Python
|
Packs/rasterize/Integrations/rasterize/rasterize.py
|
ArmatureSystems/content
|
ff7b2a9dc1900b0473cdf9efa6527fe32a21fcb7
|
[
"MIT"
] | 1
|
2020-07-22T05:55:11.000Z
|
2020-07-22T05:55:11.000Z
|
Packs/rasterize/Integrations/rasterize/rasterize.py
|
nicoloereni/content
|
ddb88044c5b39a17894dd13e7ae260d9854afc30
|
[
"MIT"
] | null | null | null |
Packs/rasterize/Integrations/rasterize/rasterize.py
|
nicoloereni/content
|
ddb88044c5b39a17894dd13e7ae260d9854afc30
|
[
"MIT"
] | 2
|
2020-07-15T06:41:52.000Z
|
2020-07-19T18:45:23.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, InvalidArgumentException, TimeoutException
from PyPDF2 import PdfFileReader
from pdf2image import convert_from_path
import numpy as np
from PIL import Image
import tempfile
from io import BytesIO
import base64
import time
import subprocess
import traceback
import re
import os
# Chrome respects proxy env params
handle_proxy()
# Make sure our python code doesn't go through a proxy when communicating with chrome webdriver
os.environ['no_proxy'] = 'localhost,127.0.0.1'
WITH_ERRORS = demisto.params().get('with_error', True)
DEFAULT_WAIT_TIME = max(int(demisto.params().get('wait_time', 0)), 0)
DEFAULT_PAGE_LOAD_TIME = int(demisto.params().get('max_page_load_time', 180))
URL_ERROR_MSG = "Can't access the URL. It might be malicious, or unreachable for one of several reasons. " \
"You can choose to receive this message as error/warning in the instance settings\n"
EMPTY_RESPONSE_ERROR_MSG = "There is nothing to render. This can occur when there is a refused connection." \
" Please check your URL."
DEFAULT_W, DEFAULT_H = '600', '800'
DEFAULT_W_WIDE = '1024'
CHROME_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36' # noqa
DRIVER_LOG = f'{tempfile.gettempdir()}/chromedriver.log'
DEFAULT_CHROME_OPTIONS = [
'--no-sandbox',
'--headless',
'--disable-gpu',
'--hide-scrollbars',
'--disable_infobars',
'--start-maximized',
'--start-fullscreen',
'--ignore-certificate-errors',
'--disable-dev-shm-usage',
f'--user-agent={CHROME_USER_AGENT}'
]
USER_CHROME_OPTIONS = demisto.params().get('chrome_options', "")
def return_err_or_warn(msg):
return_error(msg) if WITH_ERRORS else return_warning(msg, exit=True)
def opt_name(opt):
return opt.split('=', 1)[0]
def merge_options(default_options, user_options):
"""merge the defualt options and user options
Arguments:
default_options {list} -- list of options to use
user_options {string} -- user configured options comma seperated (comma value can be escaped with \\)
Returns:
list -- merged options
"""
user_options = re.split(r'(?<!\\),', user_options) if user_options else list()
if not user_options: # nothing to do
return default_options
demisto.debug(f'user chrome options: {user_options}')
options = []
remove_opts = []
for opt in user_options:
opt = opt.strip()
if opt.startswith('[') and opt.endswith(']'):
remove_opts.append(opt[1:-1])
else:
options.append(opt.replace(r'\,', ','))
# remove values (such as in user-agent)
option_names = [opt_name(x) for x in options]
# add filtered defaults only if not in removed and we don't have it already
options.extend([x for x in default_options if (opt_name(x) not in remove_opts and opt_name(x) not in option_names)])
return options
def check_response(driver):
EMPTY_PAGE = '<html><head></head><body></body></html>'
if driver.page_source == EMPTY_PAGE:
return_err_or_warn(EMPTY_RESPONSE_ERROR_MSG)
def init_driver(offline_mode=False):
"""
Creates headless Google Chrome Web Driver
"""
demisto.debug(f'Creating chrome driver. Mode: {"OFFLINE" if offline_mode else "ONLINE"}')
try:
chrome_options = webdriver.ChromeOptions()
for opt in merge_options(DEFAULT_CHROME_OPTIONS, USER_CHROME_OPTIONS):
chrome_options.add_argument(opt)
driver = webdriver.Chrome(options=chrome_options, service_args=[
f'--log-path={DRIVER_LOG}',
])
if offline_mode:
driver.set_network_conditions(offline=True, latency=5, throughput=500 * 1024)
except Exception as ex:
return_error(f'Unexpected exception: {ex}\nTrace:{traceback.format_exc()}')
demisto.debug('Creating chrome driver - COMPLETED')
return driver
def find_zombie_processes():
"""find zombie proceses
Returns:
([process ids], raw ps output) -- return a tuple of zombie process ids and raw ps output
"""
ps_out = subprocess.check_output(['ps', '-e', '-o', 'pid,ppid,state,cmd'],
stderr=subprocess.STDOUT, universal_newlines=True)
lines = ps_out.splitlines()
pid = str(os.getpid())
zombies = []
if len(lines) > 1:
for line in lines[1:]:
pinfo = line.split()
if pinfo[2] == 'Z' and pinfo[1] == pid: # zombie process
zombies.append(pinfo[0])
return zombies, ps_out
def quit_driver_and_reap_children(driver):
"""
Quits the driver's session and reaps all of zombie child processes
:param driver: The driver
:return: None
"""
demisto.debug(f'Quitting driver session: {driver.session_id}')
driver.quit()
try:
zombies, ps_out = find_zombie_processes()
if zombies:
demisto.info(f'Found zombie processes will waitpid: {ps_out}')
for pid in zombies:
waitres = os.waitpid(int(pid), os.WNOHANG)[1]
demisto.info(f'waitpid result: {waitres}')
else:
demisto.debug(f'No zombie processes found for ps output: {ps_out}')
except Exception as e:
demisto.error(f'Failed checking for zombie processes: {e}. Trace: {traceback.format_exc()}')
def rasterize(path: str, width: int, height: int, r_type: str = 'png', wait_time: int = 0,
offline_mode: bool = False, max_page_load_time: int = 180):
"""
Capturing a snapshot of a path (url/file), using Chrome Driver
:param offline_mode: when set to True, will block any outgoing communication
:param path: file path, or website url
:param width: desired snapshot width in pixels
:param height: desired snapshot height in pixels
:param r_type: result type: .png/.pdf
:param wait_time: time in seconds to wait before taking a screenshot
"""
driver = init_driver(offline_mode)
page_load_time = max_page_load_time if max_page_load_time > 0 else DEFAULT_PAGE_LOAD_TIME
try:
demisto.debug(f'Navigating to path: {path}. Mode: {"OFFLINE" if offline_mode else "ONLINE"}. page load: {page_load_time}')
driver.set_page_load_timeout(page_load_time)
driver.get(path)
driver.implicitly_wait(5)
if wait_time > 0 or DEFAULT_WAIT_TIME > 0:
time.sleep(wait_time or DEFAULT_WAIT_TIME)
check_response(driver)
demisto.debug('Navigating to path - COMPLETED')
if r_type.lower() == 'pdf':
output = get_pdf(driver, width, height)
else:
output = get_image(driver, width, height)
return output
except (InvalidArgumentException, NoSuchElementException) as ex:
if 'invalid argument' in str(ex):
err_msg = URL_ERROR_MSG + str(ex)
return_err_or_warn(err_msg)
else:
return_err_or_warn(f'Invalid exception: {ex}\nTrace:{traceback.format_exc()}')
except TimeoutException as ex:
return_err_or_warn(f'Timeout exception with max load time of: {page_load_time} seconds. {ex}')
except Exception as ex:
err_str = f'General error: {ex}\nTrace:{traceback.format_exc()}'
demisto.error(err_str)
return_err_or_warn(err_str)
finally:
quit_driver_and_reap_children(driver)
def get_image(driver, width: int, height: int):
"""
Uses the Chrome driver to generate an image out of a currently loaded path
:return: .png file of the loaded path
"""
demisto.debug('Capturing screenshot')
# Set windows size
driver.set_window_size(width, height)
image = driver.get_screenshot_as_png()
driver.quit()
demisto.debug('Capturing screenshot - COMPLETED')
return image
def get_pdf(driver, width: int, height: int):
"""
Uses the Chrome driver to generate an pdf file out of a currently loaded path
:return: .pdf file of the loaded path
"""
demisto.debug('Generating PDF')
driver.set_window_size(width, height)
resource = f'{driver.command_executor._url}/session/{driver.session_id}/chromium/send_command_and_get_result'
body = json.dumps({'cmd': 'Page.printToPDF', 'params': {'landscape': False}})
response = driver.command_executor._request('POST', resource, body)
if response.get('status'):
demisto.results(response.get('status'))
return_error(response.get('value'))
data = base64.b64decode(response.get('value').get('data'))
demisto.debug('Generating PDF - COMPLETED')
return data
def convert_pdf_to_jpeg(path: str, max_pages: int, password: str, horizontal: bool = False):
"""
Converts a PDF file into a jpeg image
:param path: file's path
:param max_pages: max pages to render
:param password: PDF password
:param horizontal: if True, will combine the pages horizontally
:return: stream of combined image
"""
demisto.debug(f'Loading file at Path: {path}')
input_pdf = PdfFileReader(open(path, "rb"))
pages = min(max_pages, input_pdf.numPages)
with tempfile.TemporaryDirectory() as output_folder:
demisto.debug('Converting PDF')
convert_from_path(
pdf_path=path,
fmt='jpeg',
first_page=1,
last_page=pages,
output_folder=output_folder,
userpw=password,
output_file='converted_pdf_'
)
demisto.debug('Converting PDF - COMPLETED')
demisto.debug('Combining all pages')
images = []
for page in sorted(os.listdir(output_folder)):
if os.path.isfile(os.path.join(output_folder, page)) and 'converted_pdf_' in page:
images.append(Image.open(os.path.join(output_folder, page)))
min_shape = min([(np.sum(page_.size), page_.size) for page_ in images])[1] # get the minimal width
if horizontal:
imgs_comb = np.hstack([np.asarray(i.resize(min_shape)) for i in images])
else:
imgs_comb = np.vstack([np.asarray(i.resize(min_shape)) for i in images])
imgs_comb = Image.fromarray(imgs_comb)
output = BytesIO()
imgs_comb.save(output, 'JPEG')
demisto.debug('Combining all pages - COMPLETED')
return output.getvalue()
def rasterize_command():
url = demisto.getArg('url')
w = demisto.args().get('width', DEFAULT_W_WIDE).rstrip('px')
h = demisto.args().get('height', DEFAULT_H).rstrip('px')
r_type = demisto.args().get('type', 'png')
wait_time = int(demisto.args().get('wait_time', 0))
page_load = int(demisto.args().get('max_page_load_time', DEFAULT_PAGE_LOAD_TIME))
if not (url.startswith('http')):
url = f'http://{url}'
filename = f'url.{"pdf" if r_type == "pdf" else "png"}' # type: ignore
output = rasterize(path=url, r_type=r_type, width=w, height=h, wait_time=wait_time, max_page_load_time=page_load)
res = fileResult(filename=filename, data=output)
if r_type == 'png':
res['Type'] = entryTypes['image']
demisto.results(res)
def rasterize_image_command():
args = demisto.args()
entry_id = args.get('EntryID')
w = args.get('width', DEFAULT_W).rstrip('px')
h = args.get('height', DEFAULT_H).rstrip('px')
file_path = demisto.getFilePath(entry_id).get('path')
filename = f'{entry_id}.pdf'
with open(file_path, 'rb') as f, open('output_image', 'w') as image:
data = base64.b64encode(f.read()).decode('utf-8')
image.write(data)
output = rasterize(path=f'file://{os.path.realpath(f.name)}', width=w, height=h, r_type='pdf')
res = fileResult(filename=filename, data=output)
res['Type'] = entryTypes['image']
demisto.results(res)
def rasterize_email_command():
html_body = demisto.args().get('htmlBody')
w = demisto.args().get('width', DEFAULT_W).rstrip('px')
h = demisto.args().get('height', DEFAULT_H).rstrip('px')
offline = demisto.args().get('offline', 'false') == 'true'
r_type = demisto.args().get('type', 'png')
filename = f'email.{"pdf" if r_type.lower() == "pdf" else "png"}' # type: ignore
with open('htmlBody.html', 'w') as f:
f.write(f'<html style="background:white";>{html_body}</html>')
path = f'file://{os.path.realpath(f.name)}'
output = rasterize(path=path, r_type=r_type, width=w, height=h, offline_mode=offline)
res = fileResult(filename=filename, data=output)
if r_type == 'png':
res['Type'] = entryTypes['image']
demisto.results(res)
def rasterize_pdf_command():
entry_id = demisto.args().get('EntryID')
password = demisto.args().get('pdfPassword')
max_pages = int(demisto.args().get('maxPages', 30))
horizontal = demisto.args().get('horizontal', 'false') == 'true'
file_path = demisto.getFilePath(entry_id).get('path')
filename = 'image.jpeg' # type: ignore
with open(file_path, 'rb') as f:
output = convert_pdf_to_jpeg(path=os.path.realpath(f.name), max_pages=max_pages, password=password,
horizontal=horizontal)
res = fileResult(filename=filename, data=output)
res['Type'] = entryTypes['image']
demisto.results(res)
def module_test():
# setting up a mock email file
with tempfile.NamedTemporaryFile('w+') as test_file:
test_file.write('<html><head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\">'
'</head><body><br>---------- TEST FILE ----------<br></body></html>')
test_file.flush()
file_path = f'file://{os.path.realpath(test_file.name)}'
# rasterizing the file
rasterize(path=file_path, width=250, height=250)
demisto.results('ok')
def main():
try:
with open(DRIVER_LOG, 'w'):
pass # truncate the log file
if demisto.command() == 'test-module':
module_test()
elif demisto.command() == 'rasterize-image':
rasterize_image_command()
elif demisto.command() == 'rasterize-email':
rasterize_email_command()
elif demisto.command() == 'rasterize-pdf':
rasterize_pdf_command()
elif demisto.command() == 'rasterize':
rasterize_command()
else:
return_error('Unrecognized command')
except Exception as ex:
return_err_or_warn(f'Unexpected exception: {ex}\nTrace:{traceback.format_exc()}')
finally:
if is_debug_mode():
demisto.debug(f'os.environ: {os.environ}')
with open(DRIVER_LOG, 'r') as log:
demisto.debug('Driver log:' + log.read())
if __name__ in ["__builtin__", "builtins", '__main__']:
main()
| 36.2954
| 151
| 0.651701
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, InvalidArgumentException, TimeoutException
from PyPDF2 import PdfFileReader
from pdf2image import convert_from_path
import numpy as np
from PIL import Image
import tempfile
from io import BytesIO
import base64
import time
import subprocess
import traceback
import re
import os
handle_proxy()
os.environ['no_proxy'] = 'localhost,127.0.0.1'
WITH_ERRORS = demisto.params().get('with_error', True)
DEFAULT_WAIT_TIME = max(int(demisto.params().get('wait_time', 0)), 0)
DEFAULT_PAGE_LOAD_TIME = int(demisto.params().get('max_page_load_time', 180))
URL_ERROR_MSG = "Can't access the URL. It might be malicious, or unreachable for one of several reasons. " \
"You can choose to receive this message as error/warning in the instance settings\n"
EMPTY_RESPONSE_ERROR_MSG = "There is nothing to render. This can occur when there is a refused connection." \
" Please check your URL."
DEFAULT_W, DEFAULT_H = '600', '800'
DEFAULT_W_WIDE = '1024'
CHROME_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'
DRIVER_LOG = f'{tempfile.gettempdir()}/chromedriver.log'
DEFAULT_CHROME_OPTIONS = [
'--no-sandbox',
'--headless',
'--disable-gpu',
'--hide-scrollbars',
'--disable_infobars',
'--start-maximized',
'--start-fullscreen',
'--ignore-certificate-errors',
'--disable-dev-shm-usage',
f'--user-agent={CHROME_USER_AGENT}'
]
USER_CHROME_OPTIONS = demisto.params().get('chrome_options', "")
def return_err_or_warn(msg):
return_error(msg) if WITH_ERRORS else return_warning(msg, exit=True)
def opt_name(opt):
return opt.split('=', 1)[0]
def merge_options(default_options, user_options):
user_options = re.split(r'(?<!\\),', user_options) if user_options else list()
if not user_options:
return default_options
demisto.debug(f'user chrome options: {user_options}')
options = []
remove_opts = []
for opt in user_options:
opt = opt.strip()
if opt.startswith('[') and opt.endswith(']'):
remove_opts.append(opt[1:-1])
else:
options.append(opt.replace(r'\,', ','))
option_names = [opt_name(x) for x in options]
options.extend([x for x in default_options if (opt_name(x) not in remove_opts and opt_name(x) not in option_names)])
return options
def check_response(driver):
EMPTY_PAGE = '<html><head></head><body></body></html>'
if driver.page_source == EMPTY_PAGE:
return_err_or_warn(EMPTY_RESPONSE_ERROR_MSG)
def init_driver(offline_mode=False):
demisto.debug(f'Creating chrome driver. Mode: {"OFFLINE" if offline_mode else "ONLINE"}')
try:
chrome_options = webdriver.ChromeOptions()
for opt in merge_options(DEFAULT_CHROME_OPTIONS, USER_CHROME_OPTIONS):
chrome_options.add_argument(opt)
driver = webdriver.Chrome(options=chrome_options, service_args=[
f'--log-path={DRIVER_LOG}',
])
if offline_mode:
driver.set_network_conditions(offline=True, latency=5, throughput=500 * 1024)
except Exception as ex:
return_error(f'Unexpected exception: {ex}\nTrace:{traceback.format_exc()}')
demisto.debug('Creating chrome driver - COMPLETED')
return driver
def find_zombie_processes():
ps_out = subprocess.check_output(['ps', '-e', '-o', 'pid,ppid,state,cmd'],
stderr=subprocess.STDOUT, universal_newlines=True)
lines = ps_out.splitlines()
pid = str(os.getpid())
zombies = []
if len(lines) > 1:
for line in lines[1:]:
pinfo = line.split()
if pinfo[2] == 'Z' and pinfo[1] == pid: # zombie process
zombies.append(pinfo[0])
return zombies, ps_out
def quit_driver_and_reap_children(driver):
demisto.debug(f'Quitting driver session: {driver.session_id}')
driver.quit()
try:
zombies, ps_out = find_zombie_processes()
if zombies:
demisto.info(f'Found zombie processes will waitpid: {ps_out}')
for pid in zombies:
waitres = os.waitpid(int(pid), os.WNOHANG)[1]
demisto.info(f'waitpid result: {waitres}')
else:
demisto.debug(f'No zombie processes found for ps output: {ps_out}')
except Exception as e:
demisto.error(f'Failed checking for zombie processes: {e}. Trace: {traceback.format_exc()}')
def rasterize(path: str, width: int, height: int, r_type: str = 'png', wait_time: int = 0,
offline_mode: bool = False, max_page_load_time: int = 180):
driver = init_driver(offline_mode)
page_load_time = max_page_load_time if max_page_load_time > 0 else DEFAULT_PAGE_LOAD_TIME
try:
demisto.debug(f'Navigating to path: {path}. Mode: {"OFFLINE" if offline_mode else "ONLINE"}. page load: {page_load_time}')
driver.set_page_load_timeout(page_load_time)
driver.get(path)
driver.implicitly_wait(5)
if wait_time > 0 or DEFAULT_WAIT_TIME > 0:
time.sleep(wait_time or DEFAULT_WAIT_TIME)
check_response(driver)
demisto.debug('Navigating to path - COMPLETED')
if r_type.lower() == 'pdf':
output = get_pdf(driver, width, height)
else:
output = get_image(driver, width, height)
return output
except (InvalidArgumentException, NoSuchElementException) as ex:
if 'invalid argument' in str(ex):
err_msg = URL_ERROR_MSG + str(ex)
return_err_or_warn(err_msg)
else:
return_err_or_warn(f'Invalid exception: {ex}\nTrace:{traceback.format_exc()}')
except TimeoutException as ex:
return_err_or_warn(f'Timeout exception with max load time of: {page_load_time} seconds. {ex}')
except Exception as ex:
err_str = f'General error: {ex}\nTrace:{traceback.format_exc()}'
demisto.error(err_str)
return_err_or_warn(err_str)
finally:
quit_driver_and_reap_children(driver)
def get_image(driver, width: int, height: int):
demisto.debug('Capturing screenshot')
# Set windows size
driver.set_window_size(width, height)
image = driver.get_screenshot_as_png()
driver.quit()
demisto.debug('Capturing screenshot - COMPLETED')
return image
def get_pdf(driver, width: int, height: int):
demisto.debug('Generating PDF')
driver.set_window_size(width, height)
resource = f'{driver.command_executor._url}/session/{driver.session_id}/chromium/send_command_and_get_result'
body = json.dumps({'cmd': 'Page.printToPDF', 'params': {'landscape': False}})
response = driver.command_executor._request('POST', resource, body)
if response.get('status'):
demisto.results(response.get('status'))
return_error(response.get('value'))
data = base64.b64decode(response.get('value').get('data'))
demisto.debug('Generating PDF - COMPLETED')
return data
def convert_pdf_to_jpeg(path: str, max_pages: int, password: str, horizontal: bool = False):
demisto.debug(f'Loading file at Path: {path}')
input_pdf = PdfFileReader(open(path, "rb"))
pages = min(max_pages, input_pdf.numPages)
with tempfile.TemporaryDirectory() as output_folder:
demisto.debug('Converting PDF')
convert_from_path(
pdf_path=path,
fmt='jpeg',
first_page=1,
last_page=pages,
output_folder=output_folder,
userpw=password,
output_file='converted_pdf_'
)
demisto.debug('Converting PDF - COMPLETED')
demisto.debug('Combining all pages')
images = []
for page in sorted(os.listdir(output_folder)):
if os.path.isfile(os.path.join(output_folder, page)) and 'converted_pdf_' in page:
images.append(Image.open(os.path.join(output_folder, page)))
min_shape = min([(np.sum(page_.size), page_.size) for page_ in images])[1] # get the minimal width
if horizontal:
imgs_comb = np.hstack([np.asarray(i.resize(min_shape)) for i in images])
else:
imgs_comb = np.vstack([np.asarray(i.resize(min_shape)) for i in images])
imgs_comb = Image.fromarray(imgs_comb)
output = BytesIO()
imgs_comb.save(output, 'JPEG')
demisto.debug('Combining all pages - COMPLETED')
return output.getvalue()
def rasterize_command():
url = demisto.getArg('url')
w = demisto.args().get('width', DEFAULT_W_WIDE).rstrip('px')
h = demisto.args().get('height', DEFAULT_H).rstrip('px')
r_type = demisto.args().get('type', 'png')
wait_time = int(demisto.args().get('wait_time', 0))
page_load = int(demisto.args().get('max_page_load_time', DEFAULT_PAGE_LOAD_TIME))
if not (url.startswith('http')):
url = f'http://{url}'
filename = f'url.{"pdf" if r_type == "pdf" else "png"}' # type: ignore
output = rasterize(path=url, r_type=r_type, width=w, height=h, wait_time=wait_time, max_page_load_time=page_load)
res = fileResult(filename=filename, data=output)
if r_type == 'png':
res['Type'] = entryTypes['image']
demisto.results(res)
def rasterize_image_command():
args = demisto.args()
entry_id = args.get('EntryID')
w = args.get('width', DEFAULT_W).rstrip('px')
h = args.get('height', DEFAULT_H).rstrip('px')
file_path = demisto.getFilePath(entry_id).get('path')
filename = f'{entry_id}.pdf'
with open(file_path, 'rb') as f, open('output_image', 'w') as image:
data = base64.b64encode(f.read()).decode('utf-8')
image.write(data)
output = rasterize(path=f'file://{os.path.realpath(f.name)}', width=w, height=h, r_type='pdf')
res = fileResult(filename=filename, data=output)
res['Type'] = entryTypes['image']
demisto.results(res)
def rasterize_email_command():
html_body = demisto.args().get('htmlBody')
w = demisto.args().get('width', DEFAULT_W).rstrip('px')
h = demisto.args().get('height', DEFAULT_H).rstrip('px')
offline = demisto.args().get('offline', 'false') == 'true'
r_type = demisto.args().get('type', 'png')
filename = f'email.{"pdf" if r_type.lower() == "pdf" else "png"}' # type: ignore
with open('htmlBody.html', 'w') as f:
f.write(f'<html style="background:white";>{html_body}</html>')
path = f'file://{os.path.realpath(f.name)}'
output = rasterize(path=path, r_type=r_type, width=w, height=h, offline_mode=offline)
res = fileResult(filename=filename, data=output)
if r_type == 'png':
res['Type'] = entryTypes['image']
demisto.results(res)
def rasterize_pdf_command():
entry_id = demisto.args().get('EntryID')
password = demisto.args().get('pdfPassword')
max_pages = int(demisto.args().get('maxPages', 30))
horizontal = demisto.args().get('horizontal', 'false') == 'true'
file_path = demisto.getFilePath(entry_id).get('path')
filename = 'image.jpeg' # type: ignore
with open(file_path, 'rb') as f:
output = convert_pdf_to_jpeg(path=os.path.realpath(f.name), max_pages=max_pages, password=password,
horizontal=horizontal)
res = fileResult(filename=filename, data=output)
res['Type'] = entryTypes['image']
demisto.results(res)
def module_test():
# setting up a mock email file
with tempfile.NamedTemporaryFile('w+') as test_file:
test_file.write('<html><head><meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\">'
'</head><body><br>---------- TEST FILE ----------<br></body></html>')
test_file.flush()
file_path = f'file://{os.path.realpath(test_file.name)}'
# rasterizing the file
rasterize(path=file_path, width=250, height=250)
demisto.results('ok')
def main():
try:
with open(DRIVER_LOG, 'w'):
pass # truncate the log file
if demisto.command() == 'test-module':
module_test()
elif demisto.command() == 'rasterize-image':
rasterize_image_command()
elif demisto.command() == 'rasterize-email':
rasterize_email_command()
elif demisto.command() == 'rasterize-pdf':
rasterize_pdf_command()
elif demisto.command() == 'rasterize':
rasterize_command()
else:
return_error('Unrecognized command')
except Exception as ex:
return_err_or_warn(f'Unexpected exception: {ex}\nTrace:{traceback.format_exc()}')
finally:
if is_debug_mode():
demisto.debug(f'os.environ: {os.environ}')
with open(DRIVER_LOG, 'r') as log:
demisto.debug('Driver log:' + log.read())
if __name__ in ["__builtin__", "builtins", '__main__']:
main()
| true
| true
|
1c42fa84f38c1e3b6406c4513c46d997acdfc98f
| 4,904
|
py
|
Python
|
testsuite/tiff-suite/run.py
|
Alexander-Murashko/oiio
|
2cb95cf674e6cb085eb14614c428535ed2b8989b
|
[
"BSD-3-Clause"
] | 1
|
2018-02-06T23:58:03.000Z
|
2018-02-06T23:58:03.000Z
|
testsuite/tiff-suite/run.py
|
Alexander-Murashko/oiio
|
2cb95cf674e6cb085eb14614c428535ed2b8989b
|
[
"BSD-3-Clause"
] | null | null | null |
testsuite/tiff-suite/run.py
|
Alexander-Murashko/oiio
|
2cb95cf674e6cb085eb14614c428535ed2b8989b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import os
import sys
path = ""
cmdpath = ""
command = ""
if len(sys.argv) > 2 :
os.chdir (sys.argv[1])
path = sys.argv[2] + "/"
cmdpath = sys.argv[2] + "/"
sys.path = [".."] + sys.path
import runtest
# Start off
hi = "echo hi"
command = hi + "> out.txt"
imagedir = "../../../libtiffpic"
# caspian.tif 279x220 64-bit floating point (deflate) Caspian Sea from space
# I can't get this to work with OIIO, but I can't get it to read with
# ImageMagick or OSX preview, either.
# FIXME?
# cramps.tif 800x607 8-bit b&w (packbits) "cramps poster"
# This tests 1-bit images, and packbits compression
# cramps-tile.tif 256x256 tiled version of cramps.tif (no compression)
# Tests tiled images (especially tiled 1-bit) -- compare it to cramps
command = command + "; " + runtest.rw_command (imagedir, "cramps.tif", path)
command = command + "; " + runtest.rw_command (imagedir, "cramps-tile.tif", path)
command = command + "; " + runtest.diff_command (imagedir+"/cramps-tile.tif",
imagedir+"/cramps.tif", path)
# dscf0013.tif 640x480 YCbCr digital camera image which lacks Reference
# Black/White values. Contains EXIF SubIFD. No compression.
# FIXME - we don't support YCbCr yet.
# fax2d.tif 1728x1082 1-bit b&w (G3/2D) facsimile
# FIXME - we read the pixel data fine, but we fail to recognize that
# differing XResolution and YResolution imply a non-square pixel
# aspect ratio, and iv fails to display it well for this reason.
command = command + "; " + runtest.rw_command (imagedir, "fax2d.tif", path)
# g3test.tif TIFF equivalent of g3test.g3 created by fax2tiff
command = command + "; " + runtest.rw_command (imagedir, "g3test.tif", path)
# FIXME - same aspect ratio issue as fax2d.tif
# jello.tif 256x192 8-bit RGB (packbits palette) Paul Heckbert "jello"
command = command + "; " + runtest.rw_command (imagedir, "jello.tif", path)
# ladoga.tif 158x118 16-bit unsigned, single band, deflate
# NOTE -- I have no idea if we read this correctly. Neither ImageMagick
# nor OSX preview display a meaningful image.
# off_l16.tif 333x225 8-bit CIE LogL (SGILog) office from Greg Larson
# off_luv24.tif 333x225 8-bit CIE LogLuv (SGILog24) office from " "
# off_luv32.tif 333x225 8-bit CIE LogLuv (SGILog) office from " "
# FIXME -- we just don't handle LogL or LogLuv yet
# pc260001.tif 640x480 8-bit RGB digital camera image. Contains EXIF SubIFD.
# No compression.
# FIXME? - we don't seem to recognize additional Exif data that's in the
# 'Maker Note', which includes GainControl
command = command + "; " + runtest.rw_command (imagedir, "pc260001.tif", path)
# quad-jpeg.tif 512x384 8-bit YCbCr (jpeg) version of quad-lzw.tif
# FIXME -- we don't handle this (YCbCr? jpeg?)
# NOTE -- OSX preview doesn't handle this either (but ImageMagick does)
# quad-lzw.tif 512x384 8-bit RGB (lzw) "quadric surfaces"
# quad-tile.tif 512x384 tiled version of quad-lzw.tif (lzw)
command = command + "; " + runtest.rw_command (imagedir, "quad-lzw.tif", path)
command = command + "; " + runtest.rw_command (imagedir, "quad-tile.tif", path)
command = command + "; " + runtest.diff_command (imagedir+"/quad-tile.tif",
imagedir+"/quad-lzw.tif", path)
# strike.tif 256x200 8-bit RGBA (lzw) "bowling pins" from Pixar
command = command + "; " + runtest.rw_command (imagedir, "strike.tif", path)
# text.tif 1512x359 4-bit b&w (thunderscan) am-express credit card
# FIXME -- we don't get this right
# ycbcr-cat.tif 250x325 8-bit YCbCr (lzw) "kitty" created by rgb2ycbcr
# FIXME -- we don't get this right
# smallliz.tif 160x160 8-bit YCbCr (OLD jpeg) lizard from HP**
# zackthecat.tif 234x213 8-bit YCbCr (OLD jpeg) tiled "ZackTheCat" from NeXT**
# considered a deprecated format, not supported by libtiff
# oxford.tif 601x81 8-bit RGB (lzw) screendump off oxford
command = command + "; " + runtest.rw_command (imagedir, "oxford.tif", path, 0)
# The other images are from Hewlett Packard and exemplify the use of the
# HalftoneHints tag (in their words):
# The images are all the same subject, and should all appear the same
# after rendering. Each of the images is slightly different as outlined
# by the following table:
#
# FileName ToneRange HalftoneHints
# jim___cg.tif A Y
# jim___dg.tif B N
# jim___gg.tif B Y
#
# NOTE -- OIIO appears to read this fine, but I'm really not sure how to
# judge if it's "correct"
# Outputs to check against references
outputs = [ "out.txt" ]
# Files that need to be cleaned up, IN ADDITION to outputs
cleanfiles = [ "cramps-tile.tif", "g3test.tif", "quad-lzw.tif",
"cramps.tif", "jello.tif", "quad-tile.tif", "strike.tif",
"fax2d.tif", "pc260001.tif" ]
# boilerplate
ret = runtest.runtest (command, outputs, cleanfiles)
sys.exit (ret)
| 39.232
| 81
| 0.684339
|
import os
import sys
path = ""
cmdpath = ""
command = ""
if len(sys.argv) > 2 :
os.chdir (sys.argv[1])
path = sys.argv[2] + "/"
cmdpath = sys.argv[2] + "/"
sys.path = [".."] + sys.path
import runtest
hi = "echo hi"
command = hi + "> out.txt"
imagedir = "../../../libtiffpic"
command = command + "; " + runtest.rw_command (imagedir, "cramps.tif", path)
command = command + "; " + runtest.rw_command (imagedir, "cramps-tile.tif", path)
command = command + "; " + runtest.diff_command (imagedir+"/cramps-tile.tif",
imagedir+"/cramps.tif", path)
# fax2d.tif 1728x1082 1-bit b&w (G3/2D) facsimile
# FIXME - we read the pixel data fine, but we fail to recognize that
# differing XResolution and YResolution imply a non-square pixel
# aspect ratio, and iv fails to display it well for this reason.
command = command + "; " + runtest.rw_command (imagedir, "fax2d.tif", path)
# g3test.tif TIFF equivalent of g3test.g3 created by fax2tiff
command = command + "; " + runtest.rw_command (imagedir, "g3test.tif", path)
# FIXME - same aspect ratio issue as fax2d.tif
# jello.tif 256x192 8-bit RGB (packbits palette) Paul Heckbert "jello"
command = command + "; " + runtest.rw_command (imagedir, "jello.tif", path)
# ladoga.tif 158x118 16-bit unsigned, single band, deflate
# NOTE -- I have no idea if we read this correctly. Neither ImageMagick
# nor OSX preview display a meaningful image.
# off_l16.tif 333x225 8-bit CIE LogL (SGILog) office from Greg Larson
# off_luv24.tif 333x225 8-bit CIE LogLuv (SGILog24) office from " "
# off_luv32.tif 333x225 8-bit CIE LogLuv (SGILog) office from " "
# FIXME -- we just don't handle LogL or LogLuv yet
command = command + "; " + runtest.rw_command (imagedir, "pc260001.tif", path)
# NOTE -- OSX preview doesn't handle this either (but ImageMagick does)
command = command + "; " + runtest.rw_command (imagedir, "quad-lzw.tif", path)
command = command + "; " + runtest.rw_command (imagedir, "quad-tile.tif", path)
command = command + "; " + runtest.diff_command (imagedir+"/quad-tile.tif",
imagedir+"/quad-lzw.tif", path)
command = command + "; " + runtest.rw_command (imagedir, "strike.tif", path)
# ycbcr-cat.tif 250x325 8-bit YCbCr (lzw) "kitty" created by rgb2ycbcr
# FIXME -- we don't get this right
command = command + "; " + runtest.rw_command (imagedir, "oxford.tif", path, 0)
# judge if it's "correct"
outputs = [ "out.txt" ]
cleanfiles = [ "cramps-tile.tif", "g3test.tif", "quad-lzw.tif",
"cramps.tif", "jello.tif", "quad-tile.tif", "strike.tif",
"fax2d.tif", "pc260001.tif" ]
ret = runtest.runtest (command, outputs, cleanfiles)
sys.exit (ret)
| true
| true
|
1c42fbb78065bfe027c807c64b143bd34d0b208a
| 1,151
|
py
|
Python
|
globaltechmap/spider/mysqltest.py
|
kcdyx1/python_study
|
abba84b98382c253dbf07b122a33d273c7e11832
|
[
"MIT"
] | 1
|
2018-01-16T12:52:18.000Z
|
2018-01-16T12:52:18.000Z
|
globaltechmap/spider/mysqltest.py
|
kcdyx1/python_study
|
abba84b98382c253dbf07b122a33d273c7e11832
|
[
"MIT"
] | null | null | null |
globaltechmap/spider/mysqltest.py
|
kcdyx1/python_study
|
abba84b98382c253dbf07b122a33d273c7e11832
|
[
"MIT"
] | null | null | null |
# coding:utf8
import pymysql
conn = pymysql.connect(host ='localhost', port =8889, user ='root', password ='root', db='globaltechmap', charset='utf8')
cursor = conn.cursor()
fields ={
'hy':'haiyang',
'hk': 'hangkong',
'ht': 'hangtian',
'kjzl':'kejizhanlue',
'ny': 'nengyuan',
'sw': 'shengwu',
'xjzz': 'xianjinzhizao',
'xcl':'xincailiao',
'xx': 'xinxi'
}
field_alia = input("请输入需要查询的领域首字母:")
field = fields[field_alia]
year_input = input("请输入需要查询的年份:")
year = "'" + year_input
sql = "SELECT content FROM "+ field + " where date between " + year + "-01-01' and " + year + "-12-31'"
words = ()
try:
cursor.execute(sql) #执行sql语句
results = cursor.fetchall()
# print(len(results))
for row in results:
content = row[0]
# 保存查询结果
chaxuejieguo = "/Users/kangchen/python_study/globaltechmap/spider/MySQL_Results/" + field + "_neirong_" + year.replace("'","") + ".csv"
with open(chaxuejieguo, 'a') as fw:
fw.write(content)
print("中共中央贺电:查询结果已经成功保存!")
except Exception as e:
raise e
finally:
conn.close()
| 26.767442
| 143
| 0.585578
|
import pymysql
conn = pymysql.connect(host ='localhost', port =8889, user ='root', password ='root', db='globaltechmap', charset='utf8')
cursor = conn.cursor()
fields ={
'hy':'haiyang',
'hk': 'hangkong',
'ht': 'hangtian',
'kjzl':'kejizhanlue',
'ny': 'nengyuan',
'sw': 'shengwu',
'xjzz': 'xianjinzhizao',
'xcl':'xincailiao',
'xx': 'xinxi'
}
field_alia = input("请输入需要查询的领域首字母:")
field = fields[field_alia]
year_input = input("请输入需要查询的年份:")
year = "'" + year_input
sql = "SELECT content FROM "+ field + " where date between " + year + "-01-01' and " + year + "-12-31'"
words = ()
try:
cursor.execute(sql) #执行sql语句
results = cursor.fetchall()
# print(len(results))
for row in results:
content = row[0]
# 保存查询结果
chaxuejieguo = "/Users/kangchen/python_study/globaltechmap/spider/MySQL_Results/" + field + "_neirong_" + year.replace("'","") + ".csv"
with open(chaxuejieguo, 'a') as fw:
fw.write(content)
print("中共中央贺电:查询结果已经成功保存!")
except Exception as e:
raise e
finally:
conn.close()
| true
| true
|
1c42fbea0630646e6a4b682a02e2eab311edb52b
| 1,191
|
py
|
Python
|
.history/postImages/index_20201006200308.py
|
Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE
|
9a8289d8550115362c46dea3ed8570b789c09a10
|
[
"MIT"
] | 2
|
2020-10-21T22:14:15.000Z
|
2020-10-21T22:14:16.000Z
|
.history/postImages/index_20201006200308.py
|
Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE
|
9a8289d8550115362c46dea3ed8570b789c09a10
|
[
"MIT"
] | null | null | null |
.history/postImages/index_20201006200308.py
|
Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE
|
9a8289d8550115362c46dea3ed8570b789c09a10
|
[
"MIT"
] | null | null | null |
import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
finCsv = fin[1:]
# url = https://b2ptc.herokuapp.com/bridges
finalCsv = df[1:]
obj = {}
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3]}
for i in finCsv:
x = i.split(',')
if x[6] in obj:
y= obj[x[6]]
y['province'] = x[0]
y['district'] = x[1]
y['sector'] = x[2]
y['cell'] = x[3]
y['bridge_site'] = x[4]
y['stage'] = x[5]
y['id'] = int(x[6])
y['type'] = x[7]
y['latt'] = float(x[8])
y['long'] = float(x[9])
try:
serv = float(x[10])
except:
serv = x[10]
y['served'] = serv
sv = x
print(x[11:])
# for i in finalCsv:
# x = i.split(',')
# requests.put(url+x[0],data={before:x[2],after:x[3]})
# pull each id,before image and after from df
# for each data item do a put request with the id as the param id
# and then put the before and after image in an dict and place it as the data for the put request
| 27.068182
| 97
| 0.507976
|
import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
finCsv = fin[1:]
finalCsv = df[1:]
obj = {}
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3]}
for i in finCsv:
x = i.split(',')
if x[6] in obj:
y= obj[x[6]]
y['province'] = x[0]
y['district'] = x[1]
y['sector'] = x[2]
y['cell'] = x[3]
y['bridge_site'] = x[4]
y['stage'] = x[5]
y['id'] = int(x[6])
y['type'] = x[7]
y['latt'] = float(x[8])
y['long'] = float(x[9])
try:
serv = float(x[10])
except:
serv = x[10]
y['served'] = serv
sv = x
print(x[11:])
| true
| true
|
1c42fbf1f6d7882fb4b9943cfb6e134d08583539
| 4,860
|
py
|
Python
|
espnet2/bin/enh_scoring.py
|
YoshikiMas/espnet
|
793b999a50af484a5eaf6227ef7556b48514ef15
|
[
"Apache-2.0"
] | 1
|
2022-03-25T14:41:05.000Z
|
2022-03-25T14:41:05.000Z
|
espnet2/bin/enh_scoring.py
|
YoshikiMas/espnet
|
793b999a50af484a5eaf6227ef7556b48514ef15
|
[
"Apache-2.0"
] | 2
|
2019-04-23T04:43:33.000Z
|
2019-05-13T13:06:52.000Z
|
espnet2/bin/enh_scoring.py
|
YoshikiMas/espnet
|
793b999a50af484a5eaf6227ef7556b48514ef15
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import logging
import sys
from typing import List
from typing import Union
from mir_eval.separation import bss_eval_sources
import numpy as np
from pystoi import stoi
import torch
from typeguard import check_argument_types
from espnet.utils.cli_utils import get_commandline_args
from espnet2.enh.loss.criterions.time_domain import SISNRLoss
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.fileio.sound_scp import SoundScpReader
from espnet2.utils import config_argparse
si_snr_loss = SISNRLoss()
def scoring(
output_dir: str,
dtype: str,
log_level: Union[int, str],
key_file: str,
ref_scp: List[str],
inf_scp: List[str],
ref_channel: int,
):
assert check_argument_types()
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
assert len(ref_scp) == len(inf_scp), ref_scp
num_spk = len(ref_scp)
keys = [
line.rstrip().split(maxsplit=1)[0] for line in open(key_file, encoding="utf-8")
]
ref_readers = [SoundScpReader(f, dtype=dtype, normalize=True) for f in ref_scp]
inf_readers = [SoundScpReader(f, dtype=dtype, normalize=True) for f in inf_scp]
# get sample rate
sample_rate, _ = ref_readers[0][keys[0]]
# check keys
for inf_reader, ref_reader in zip(inf_readers, ref_readers):
assert inf_reader.keys() == ref_reader.keys()
with DatadirWriter(output_dir) as writer:
for key in keys:
ref_audios = [ref_reader[key][1] for ref_reader in ref_readers]
inf_audios = [inf_reader[key][1] for inf_reader in inf_readers]
ref = np.array(ref_audios)
inf = np.array(inf_audios)
if ref.ndim > inf.ndim:
# multi-channel reference and single-channel output
ref = ref[..., ref_channel]
elif ref.ndim < inf.ndim:
# single-channel reference and multi-channel output
inf = inf[..., ref_channel]
elif ref.ndim == inf.ndim == 3:
# multi-channel reference and output
ref = ref[..., ref_channel]
inf = inf[..., ref_channel]
assert ref.shape == inf.shape, (ref.shape, inf.shape)
sdr, sir, sar, perm = bss_eval_sources(ref, inf, compute_permutation=True)
for i in range(num_spk):
stoi_score = stoi(ref[i], inf[int(perm[i])], fs_sig=sample_rate)
estoi_score = stoi(
ref[i], inf[int(perm[i])], fs_sig=sample_rate, extended=True
)
si_snr_score = -float(
si_snr_loss(
torch.from_numpy(ref[i][None, ...]),
torch.from_numpy(inf[int(perm[i])][None, ...]),
)
)
writer[f"STOI_spk{i + 1}"][key] = str(stoi_score * 100) # in percentage
writer[f"ESTOI_spk{i + 1}"][key] = str(estoi_score * 100)
writer[f"SI_SNR_spk{i + 1}"][key] = str(si_snr_score)
writer[f"SDR_spk{i + 1}"][key] = str(sdr[i])
writer[f"SAR_spk{i + 1}"][key] = str(sar[i])
writer[f"SIR_spk{i + 1}"][key] = str(sir[i])
# save permutation assigned script file
writer[f"wav_spk{i + 1}"][key] = inf_readers[perm[i]].data[key]
def get_parser():
parser = config_argparse.ArgumentParser(
description="Frontend inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--ref_scp",
type=str,
required=True,
action="append",
)
group.add_argument(
"--inf_scp",
type=str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str)
group.add_argument("--ref_channel", type=int, default=0)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
scoring(**kwargs)
if __name__ == "__main__":
main()
| 31.764706
| 88
| 0.595473
|
import argparse
import logging
import sys
from typing import List
from typing import Union
from mir_eval.separation import bss_eval_sources
import numpy as np
from pystoi import stoi
import torch
from typeguard import check_argument_types
from espnet.utils.cli_utils import get_commandline_args
from espnet2.enh.loss.criterions.time_domain import SISNRLoss
from espnet2.fileio.datadir_writer import DatadirWriter
from espnet2.fileio.sound_scp import SoundScpReader
from espnet2.utils import config_argparse
si_snr_loss = SISNRLoss()
def scoring(
output_dir: str,
dtype: str,
log_level: Union[int, str],
key_file: str,
ref_scp: List[str],
inf_scp: List[str],
ref_channel: int,
):
assert check_argument_types()
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
assert len(ref_scp) == len(inf_scp), ref_scp
num_spk = len(ref_scp)
keys = [
line.rstrip().split(maxsplit=1)[0] for line in open(key_file, encoding="utf-8")
]
ref_readers = [SoundScpReader(f, dtype=dtype, normalize=True) for f in ref_scp]
inf_readers = [SoundScpReader(f, dtype=dtype, normalize=True) for f in inf_scp]
sample_rate, _ = ref_readers[0][keys[0]]
for inf_reader, ref_reader in zip(inf_readers, ref_readers):
assert inf_reader.keys() == ref_reader.keys()
with DatadirWriter(output_dir) as writer:
for key in keys:
ref_audios = [ref_reader[key][1] for ref_reader in ref_readers]
inf_audios = [inf_reader[key][1] for inf_reader in inf_readers]
ref = np.array(ref_audios)
inf = np.array(inf_audios)
if ref.ndim > inf.ndim:
ref = ref[..., ref_channel]
elif ref.ndim < inf.ndim:
inf = inf[..., ref_channel]
elif ref.ndim == inf.ndim == 3:
ref = ref[..., ref_channel]
inf = inf[..., ref_channel]
assert ref.shape == inf.shape, (ref.shape, inf.shape)
sdr, sir, sar, perm = bss_eval_sources(ref, inf, compute_permutation=True)
for i in range(num_spk):
stoi_score = stoi(ref[i], inf[int(perm[i])], fs_sig=sample_rate)
estoi_score = stoi(
ref[i], inf[int(perm[i])], fs_sig=sample_rate, extended=True
)
si_snr_score = -float(
si_snr_loss(
torch.from_numpy(ref[i][None, ...]),
torch.from_numpy(inf[int(perm[i])][None, ...]),
)
)
writer[f"STOI_spk{i + 1}"][key] = str(stoi_score * 100)
writer[f"ESTOI_spk{i + 1}"][key] = str(estoi_score * 100)
writer[f"SI_SNR_spk{i + 1}"][key] = str(si_snr_score)
writer[f"SDR_spk{i + 1}"][key] = str(sdr[i])
writer[f"SAR_spk{i + 1}"][key] = str(sar[i])
writer[f"SIR_spk{i + 1}"][key] = str(sir[i])
writer[f"wav_spk{i + 1}"][key] = inf_readers[perm[i]].data[key]
def get_parser():
parser = config_argparse.ArgumentParser(
description="Frontend inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--ref_scp",
type=str,
required=True,
action="append",
)
group.add_argument(
"--inf_scp",
type=str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str)
group.add_argument("--ref_channel", type=int, default=0)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
scoring(**kwargs)
if __name__ == "__main__":
main()
| true
| true
|
1c42fc7046524a05a271e4cee68795d8a5e6bfa8
| 1,012
|
py
|
Python
|
examples/212_configuration.py
|
djprohasky/workshop_swinburne_2021
|
596eb12595ef7c2522dc8e03163e917ca43d2a0a
|
[
"MIT"
] | 3
|
2021-06-15T05:51:38.000Z
|
2021-06-16T11:07:15.000Z
|
examples/212_configuration.py
|
djprohasky/workshop_swinburne_2021
|
596eb12595ef7c2522dc8e03163e917ca43d2a0a
|
[
"MIT"
] | 3
|
2021-06-16T10:27:09.000Z
|
2021-06-17T02:37:04.000Z
|
examples/212_configuration.py
|
djprohasky/workshop_swinburne_2021
|
596eb12595ef7c2522dc8e03163e917ca43d2a0a
|
[
"MIT"
] | 3
|
2021-06-15T12:43:22.000Z
|
2021-06-16T11:01:38.000Z
|
# Units:
# - Revolute joint : radiants
# - Prismatic joint: meters
import math
from compas.robots.model import Joint
from compas.robots import Configuration
print('Default constructor')
print (Configuration([math.pi, 4], [Joint.REVOLUTE, Joint.PRISMATIC]))
print (Configuration([math.pi, 4], [Joint.REVOLUTE, Joint.PRISMATIC], ['joint_1', 'ext_axis_1']))
print()
print('Construct from revolute values')
print (Configuration.from_revolute_values([math.pi, 0]))
print (Configuration.from_revolute_values([math.pi, 0], ['joint_1', 'joint_2']))
print()
print('Construct from prismatic & revolute values')
print (Configuration.from_prismatic_and_revolute_values([4], [math.pi]))
print (Configuration.from_prismatic_and_revolute_values([4], [math.pi], ['ext_axis_1', 'joint_1']))
print()
print('Merge two configurations')
ext_axes = Configuration([4], [Joint.PRISMATIC], ['ext_axis_1'])
arm_joints = Configuration([math.pi], [Joint.REVOLUTE], ['joint_1'])
full_cfg = ext_axes.merged(arm_joints)
print(full_cfg)
| 34.896552
| 99
| 0.751976
|
import math
from compas.robots.model import Joint
from compas.robots import Configuration
print('Default constructor')
print (Configuration([math.pi, 4], [Joint.REVOLUTE, Joint.PRISMATIC]))
print (Configuration([math.pi, 4], [Joint.REVOLUTE, Joint.PRISMATIC], ['joint_1', 'ext_axis_1']))
print()
print('Construct from revolute values')
print (Configuration.from_revolute_values([math.pi, 0]))
print (Configuration.from_revolute_values([math.pi, 0], ['joint_1', 'joint_2']))
print()
print('Construct from prismatic & revolute values')
print (Configuration.from_prismatic_and_revolute_values([4], [math.pi]))
print (Configuration.from_prismatic_and_revolute_values([4], [math.pi], ['ext_axis_1', 'joint_1']))
print()
print('Merge two configurations')
ext_axes = Configuration([4], [Joint.PRISMATIC], ['ext_axis_1'])
arm_joints = Configuration([math.pi], [Joint.REVOLUTE], ['joint_1'])
full_cfg = ext_axes.merged(arm_joints)
print(full_cfg)
| true
| true
|
1c42fd85841e575161a3c0be5d85f83a9ef49d74
| 2,485
|
py
|
Python
|
tests/test_data/test_datasets/test_dota.py
|
open-mmlab/mmrotate
|
e22c8dfa3c309aa68ff18a5a03316f69c6eb2880
|
[
"Apache-2.0"
] | 449
|
2022-02-18T08:26:58.000Z
|
2022-03-31T11:58:32.000Z
|
tests/test_data/test_datasets/test_dota.py
|
open-mmlab/mmrotate
|
e22c8dfa3c309aa68ff18a5a03316f69c6eb2880
|
[
"Apache-2.0"
] | 162
|
2022-02-18T09:54:46.000Z
|
2022-03-31T15:40:46.000Z
|
tests/test_data/test_datasets/test_dota.py
|
open-mmlab/mmrotate
|
e22c8dfa3c309aa68ff18a5a03316f69c6eb2880
|
[
"Apache-2.0"
] | 98
|
2022-02-18T08:28:48.000Z
|
2022-03-31T08:52:11.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import tempfile
import numpy as np
import pytest
from mmdet.datasets import build_dataset
from mmrotate.datasets.dota import DOTADataset
def _create_dummy_results():
"""Create dummy results."""
boxes = [
np.array([[4.3150e+02, 7.0600e+02, 6.7686e+01, 2.1990e+01, 2.9842e-02],
[5.6351e+02, 5.3575e+02, 1.0018e+02, 1.8971e+01, 5.5499e-02],
[5.7450e+02, 5.8450e+02, 9.5567e+01, 2.1094e+01,
8.4012e-02]])
]
return [boxes]
@pytest.mark.parametrize('angle_version', ['oc'])
def test_dota_dataset(angle_version):
"""Test DOTA dataset.
Args:
angle_version (str, optional): Angle representations.
"""
# test CLASSES
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data_config = dict(
type=DOTADataset,
version=angle_version,
ann_file='tests/data/labelTxt/',
img_prefix='tests/data/images/',
pipeline=train_pipeline)
dataset = build_dataset(data_config)
assert dataset.CLASSES == ('plane', 'baseball-diamond', 'bridge',
'ground-track-field', 'small-vehicle',
'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank',
'soccer-ball-field', 'roundabout', 'harbor',
'swimming-pool', 'helicopter')
# test eval
dataset.CLASSES = ('plane', )
fake_results = _create_dummy_results()
eval_results = dataset.evaluate(fake_results)
np.testing.assert_almost_equal(eval_results['mAP'], 0.7272727)
# test format_results
tmp_filename = osp.join(tempfile.gettempdir(), 'merge_results')
if osp.exists(tmp_filename):
shutil.rmtree(tmp_filename)
dataset.format_results(fake_results, submission_dir=tmp_filename)
shutil.rmtree(tmp_filename)
# test filter_empty_gt=False
full_data_config = dict(
type=DOTADataset,
version=angle_version,
ann_file='tests/data/labelTxt/',
img_prefix='tests/data/images/',
pipeline=train_pipeline,
filter_empty_gt=False)
full_dataset = build_dataset(full_data_config)
assert len(dataset) == 1 and len(full_dataset) == 2
| 33.581081
| 79
| 0.622133
|
import os.path as osp
import shutil
import tempfile
import numpy as np
import pytest
from mmdet.datasets import build_dataset
from mmrotate.datasets.dota import DOTADataset
def _create_dummy_results():
boxes = [
np.array([[4.3150e+02, 7.0600e+02, 6.7686e+01, 2.1990e+01, 2.9842e-02],
[5.6351e+02, 5.3575e+02, 1.0018e+02, 1.8971e+01, 5.5499e-02],
[5.7450e+02, 5.8450e+02, 9.5567e+01, 2.1094e+01,
8.4012e-02]])
]
return [boxes]
@pytest.mark.parametrize('angle_version', ['oc'])
def test_dota_dataset(angle_version):
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data_config = dict(
type=DOTADataset,
version=angle_version,
ann_file='tests/data/labelTxt/',
img_prefix='tests/data/images/',
pipeline=train_pipeline)
dataset = build_dataset(data_config)
assert dataset.CLASSES == ('plane', 'baseball-diamond', 'bridge',
'ground-track-field', 'small-vehicle',
'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank',
'soccer-ball-field', 'roundabout', 'harbor',
'swimming-pool', 'helicopter')
dataset.CLASSES = ('plane', )
fake_results = _create_dummy_results()
eval_results = dataset.evaluate(fake_results)
np.testing.assert_almost_equal(eval_results['mAP'], 0.7272727)
tmp_filename = osp.join(tempfile.gettempdir(), 'merge_results')
if osp.exists(tmp_filename):
shutil.rmtree(tmp_filename)
dataset.format_results(fake_results, submission_dir=tmp_filename)
shutil.rmtree(tmp_filename)
full_data_config = dict(
type=DOTADataset,
version=angle_version,
ann_file='tests/data/labelTxt/',
img_prefix='tests/data/images/',
pipeline=train_pipeline,
filter_empty_gt=False)
full_dataset = build_dataset(full_data_config)
assert len(dataset) == 1 and len(full_dataset) == 2
| true
| true
|
1c42ff8dc3c1f00d3bba87404fc084001a8de8d3
| 1,539
|
py
|
Python
|
misc/batch_sampler.py
|
dkkim93/pytorch-maml
|
039e7ecf9b3d0b7543ebceb31a6443cc5516779a
|
[
"MIT"
] | null | null | null |
misc/batch_sampler.py
|
dkkim93/pytorch-maml
|
039e7ecf9b3d0b7543ebceb31a6443cc5516779a
|
[
"MIT"
] | null | null | null |
misc/batch_sampler.py
|
dkkim93/pytorch-maml
|
039e7ecf9b3d0b7543ebceb31a6443cc5516779a
|
[
"MIT"
] | null | null | null |
import copy
import torch
import multiprocessing as mp
from misc.utils import make_env
from misc.batch_episode import BatchEpisode
from env.subproc_vec_env import SubprocVecEnv
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class BatchSampler(object):
def __init__(self, args):
self.args = args
self.num_workers = mp.cpu_count() - 1
if self.num_workers > args.n_traj:
self.num_workers = args.n_traj
self.queue = mp.Queue()
self.envs = SubprocVecEnv(
envs=[make_env(args.env_name, args.n_agent) for _ in range(self.num_workers)],
queue=self.queue, args=args)
# Set seed to envs
self.envs.seed(0)
def sample(self):
episode = BatchEpisode(1)
for i in range(1):
self.queue.put(i)
for _ in range(self.num_workers):
self.queue.put(None)
observations, batch_ids = self.envs.reset()
dones = [False]
while (not all(dones)) or (not self.queue.empty()):
actions = copy.deepcopy(observations)
new_observations, rewards, dones, new_batch_ids, _ = self.envs.step(actions)
episode.append(observations, actions, rewards, batch_ids)
observations, batch_ids = new_observations, new_batch_ids
episode.check_length()
return episode
def reset_task(self, task):
tasks = [task for _ in range(self.num_workers)]
reset = self.envs.reset_task(tasks)
return all(reset)
| 31.408163
| 91
| 0.638077
|
import copy
import torch
import multiprocessing as mp
from misc.utils import make_env
from misc.batch_episode import BatchEpisode
from env.subproc_vec_env import SubprocVecEnv
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class BatchSampler(object):
def __init__(self, args):
self.args = args
self.num_workers = mp.cpu_count() - 1
if self.num_workers > args.n_traj:
self.num_workers = args.n_traj
self.queue = mp.Queue()
self.envs = SubprocVecEnv(
envs=[make_env(args.env_name, args.n_agent) for _ in range(self.num_workers)],
queue=self.queue, args=args)
self.envs.seed(0)
def sample(self):
episode = BatchEpisode(1)
for i in range(1):
self.queue.put(i)
for _ in range(self.num_workers):
self.queue.put(None)
observations, batch_ids = self.envs.reset()
dones = [False]
while (not all(dones)) or (not self.queue.empty()):
actions = copy.deepcopy(observations)
new_observations, rewards, dones, new_batch_ids, _ = self.envs.step(actions)
episode.append(observations, actions, rewards, batch_ids)
observations, batch_ids = new_observations, new_batch_ids
episode.check_length()
return episode
def reset_task(self, task):
tasks = [task for _ in range(self.num_workers)]
reset = self.envs.reset_task(tasks)
return all(reset)
| true
| true
|
1c42ffc6656a8ee02b871d02dbb7cb8e0157fbfd
| 1,356
|
py
|
Python
|
music/migrations/0009_auto_20200627_1107.py
|
amin-da71/Benbb96
|
0c9e37425d0665e403ba6fecf0c4b17669c29ada
|
[
"MIT"
] | null | null | null |
music/migrations/0009_auto_20200627_1107.py
|
amin-da71/Benbb96
|
0c9e37425d0665e403ba6fecf0c4b17669c29ada
|
[
"MIT"
] | 13
|
2021-02-13T20:15:18.000Z
|
2022-03-11T23:57:07.000Z
|
music/migrations/0009_auto_20200627_1107.py
|
amin-da71/Benbb96
|
0c9e37425d0665e403ba6fecf0c4b17669c29ada
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.12 on 2020-06-27 09:07
from django.db import migrations, models
from slugify import slugify
def create_slug(apps, schema_editor):
Style = apps.get_model("music", "Style")
db_alias = schema_editor.connection.alias
for style in Style.objects.using(db_alias).all():
style.slug = slugify(style.nom)
style.save(update_fields=['slug'])
class Migration(migrations.Migration):
dependencies = [
('music', '0008_lien_date_validation'),
]
operations = [
migrations.AddField(
model_name='style',
name='slug',
field=models.SlugField(unique=True, null=True),
),
migrations.AlterField(
model_name='playlist',
name='slug',
field=models.SlugField(unique=True),
),
migrations.AddField(
model_name='style',
name='description',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='style',
name='lien_wiki',
field=models.URLField(blank=True),
),
migrations.RunPython(create_slug, migrations.RunPython.noop),
migrations.AlterField(
model_name='style',
name='slug',
field=models.SlugField(unique=True),
)
]
| 27.673469
| 69
| 0.584808
|
from django.db import migrations, models
from slugify import slugify
def create_slug(apps, schema_editor):
Style = apps.get_model("music", "Style")
db_alias = schema_editor.connection.alias
for style in Style.objects.using(db_alias).all():
style.slug = slugify(style.nom)
style.save(update_fields=['slug'])
class Migration(migrations.Migration):
dependencies = [
('music', '0008_lien_date_validation'),
]
operations = [
migrations.AddField(
model_name='style',
name='slug',
field=models.SlugField(unique=True, null=True),
),
migrations.AlterField(
model_name='playlist',
name='slug',
field=models.SlugField(unique=True),
),
migrations.AddField(
model_name='style',
name='description',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='style',
name='lien_wiki',
field=models.URLField(blank=True),
),
migrations.RunPython(create_slug, migrations.RunPython.noop),
migrations.AlterField(
model_name='style',
name='slug',
field=models.SlugField(unique=True),
)
]
| true
| true
|
1c42ffd5207a56ae5d33a0921a3287201d66da0d
| 4,467
|
py
|
Python
|
uplink/converters/typing_.py
|
kamalgill/uplink
|
3ade04d230d578690ccf2c3833aedc4ac9d895c3
|
[
"MIT"
] | 918
|
2017-10-20T10:47:40.000Z
|
2022-03-27T19:10:21.000Z
|
uplink/converters/typing_.py
|
kamalgill/uplink
|
3ade04d230d578690ccf2c3833aedc4ac9d895c3
|
[
"MIT"
] | 248
|
2017-10-20T03:58:20.000Z
|
2022-03-13T18:39:16.000Z
|
uplink/converters/typing_.py
|
kamalgill/uplink
|
3ade04d230d578690ccf2c3833aedc4ac9d895c3
|
[
"MIT"
] | 66
|
2017-10-21T02:56:34.000Z
|
2022-02-15T08:27:50.000Z
|
# Standard library imports
import collections
from collections import abc
import functools
# Local imports
from uplink.converters import interfaces, register_default_converter_factory
__all__ = ["TypingConverter", "ListConverter", "DictConverter"]
class BaseTypeConverter(object):
Builder = collections.namedtuple("Builder", "build")
@classmethod
def freeze(cls, *args, **kwargs):
return cls.Builder(functools.partial(cls, *args, **kwargs))
class ListConverter(BaseTypeConverter, interfaces.Converter):
def __init__(self, elem_type):
self._elem_type = elem_type
self._elem_converter = None
def set_chain(self, chain):
self._elem_converter = chain(self._elem_type) or self._elem_type
def convert(self, value):
if isinstance(value, abc.Sequence):
return list(map(self._elem_converter, value))
else:
# TODO: Handle the case where the value is not an sequence.
return [self._elem_converter(value)]
class DictConverter(BaseTypeConverter, interfaces.Converter):
def __init__(self, key_type, value_type):
self._key_type = key_type
self._value_type = value_type
self._key_converter = None
self._value_converter = None
def set_chain(self, chain):
self._key_converter = chain(self._key_type) or self._key_type
self._value_converter = chain(self._value_type) or self._value_type
def convert(self, value):
if isinstance(value, abc.Mapping):
key_c, val_c = self._key_converter, self._value_converter
return dict((key_c(k), val_c(value[k])) for k in value)
else:
# TODO: Handle the case where the value is not a mapping.
return self._value_converter(value)
class _TypeProxy(object):
def __init__(self, func):
self._func = func
def __getitem__(self, item):
items = item if isinstance(item, tuple) else (item,)
return self._func(*items)
def _get_types(try_typing=True):
if TypingConverter.typing and try_typing:
return TypingConverter.typing.List, TypingConverter.typing.Dict
else:
return (
_TypeProxy(ListConverter.freeze),
_TypeProxy(DictConverter.freeze),
)
@register_default_converter_factory
class TypingConverter(interfaces.Factory):
"""
.. versionadded: v0.5.0
An adapter that serializes and deserializes collection types from
the :py:mod:`typing` module, such as :py:class:`typing.List`.
Inner types of a collection are recursively resolved, using other
available converters if necessary. For instance, when resolving the
type hint :py:attr:`typing.Sequence[UserSchema]`, where
:py:attr:`UserSchema` is a custom :py:class:`marshmallow.Schema`
subclass, the converter will resolve the inner type using
:py:class:`uplink.converters.MarshmallowConverter`.
.. code-block:: python
@get("/users")
def get_users(self) -> typing.Sequence[UserSchema]:
'''Fetch all users.'''
Note:
The :py:mod:`typing` module is available in the standard library
starting from Python 3.5. For earlier versions of Python, there
is a port of the module available on PyPI.
However, you can utilize this converter without the
:py:mod:`typing` module by using one of the proxies defined by
:py:class:`uplink.returns` (e.g., :py:obj:`uplink.types.List`).
"""
try:
import typing
except ImportError: # pragma: no cover
typing = None
def _check_typing(self, t):
has_origin = hasattr(t, "__origin__")
has_args = hasattr(t, "__args__")
return self.typing and has_origin and has_args
def _base_converter(self, type_):
if isinstance(type_, BaseTypeConverter.Builder):
return type_.build()
elif self._check_typing(type_):
if issubclass(type_.__origin__, self.typing.Sequence):
return ListConverter(*type_.__args__)
elif issubclass(type_.__origin__, self.typing.Mapping):
return DictConverter(*type_.__args__)
def create_response_body_converter(self, type_, *args, **kwargs):
return self._base_converter(type_)
def create_request_body_converter(self, type_, *args, **kwargs):
return self._base_converter(type_)
TypingConverter.List, TypingConverter.Dict = _get_types()
| 33.335821
| 76
| 0.67786
|
import collections
from collections import abc
import functools
from uplink.converters import interfaces, register_default_converter_factory
__all__ = ["TypingConverter", "ListConverter", "DictConverter"]
class BaseTypeConverter(object):
Builder = collections.namedtuple("Builder", "build")
@classmethod
def freeze(cls, *args, **kwargs):
return cls.Builder(functools.partial(cls, *args, **kwargs))
class ListConverter(BaseTypeConverter, interfaces.Converter):
def __init__(self, elem_type):
self._elem_type = elem_type
self._elem_converter = None
def set_chain(self, chain):
self._elem_converter = chain(self._elem_type) or self._elem_type
def convert(self, value):
if isinstance(value, abc.Sequence):
return list(map(self._elem_converter, value))
else:
return [self._elem_converter(value)]
class DictConverter(BaseTypeConverter, interfaces.Converter):
def __init__(self, key_type, value_type):
self._key_type = key_type
self._value_type = value_type
self._key_converter = None
self._value_converter = None
def set_chain(self, chain):
self._key_converter = chain(self._key_type) or self._key_type
self._value_converter = chain(self._value_type) or self._value_type
def convert(self, value):
if isinstance(value, abc.Mapping):
key_c, val_c = self._key_converter, self._value_converter
return dict((key_c(k), val_c(value[k])) for k in value)
else:
return self._value_converter(value)
class _TypeProxy(object):
def __init__(self, func):
self._func = func
def __getitem__(self, item):
items = item if isinstance(item, tuple) else (item,)
return self._func(*items)
def _get_types(try_typing=True):
if TypingConverter.typing and try_typing:
return TypingConverter.typing.List, TypingConverter.typing.Dict
else:
return (
_TypeProxy(ListConverter.freeze),
_TypeProxy(DictConverter.freeze),
)
@register_default_converter_factory
class TypingConverter(interfaces.Factory):
try:
import typing
except ImportError:
typing = None
def _check_typing(self, t):
has_origin = hasattr(t, "__origin__")
has_args = hasattr(t, "__args__")
return self.typing and has_origin and has_args
def _base_converter(self, type_):
if isinstance(type_, BaseTypeConverter.Builder):
return type_.build()
elif self._check_typing(type_):
if issubclass(type_.__origin__, self.typing.Sequence):
return ListConverter(*type_.__args__)
elif issubclass(type_.__origin__, self.typing.Mapping):
return DictConverter(*type_.__args__)
def create_response_body_converter(self, type_, *args, **kwargs):
return self._base_converter(type_)
def create_request_body_converter(self, type_, *args, **kwargs):
return self._base_converter(type_)
TypingConverter.List, TypingConverter.Dict = _get_types()
| true
| true
|
1c42ffe2341cdbc1c2a3b71c9787f8475747b71a
| 869
|
py
|
Python
|
ts/torch_handler/request_envelope/base.py
|
KYKong1997/serve
|
ce3348d8ba6ee2a02ec171ba1cd984c0cadcc4ac
|
[
"Apache-2.0"
] | null | null | null |
ts/torch_handler/request_envelope/base.py
|
KYKong1997/serve
|
ce3348d8ba6ee2a02ec171ba1cd984c0cadcc4ac
|
[
"Apache-2.0"
] | 1
|
2020-06-19T06:11:19.000Z
|
2020-06-19T06:11:19.000Z
|
ts/torch_handler/request_envelope/base.py
|
gunandrose4u/serve
|
7b25f2b5aff08fa53d656a61b6a2f5127736c9f2
|
[
"Apache-2.0"
] | null | null | null |
"""
Base class for all RequestEnvelope.
A request envelope reformats the inputs/outputs of a call to a handler.
It translates from formats specific to a model orchestrator like Seldon or
KFServing to a set of flat Python items, and vice versa.
"""
from abc import ABC, abstractmethod
class BaseEnvelope(ABC):
"""
Interface for all envelopes.
Derive from this class, replacing the abstract methods
"""
def __init__(self, handle_fn):
self._handle_fn = handle_fn
def handle(self, data, context):
if data:
data = self.parse_input(data)
results = self._handle_fn(data, context)
if results:
results = self.format_output(results)
return results
@abstractmethod
def parse_input(self, data):
pass
@abstractmethod
def format_output(self, data):
pass
| 23.486486
| 74
| 0.669735
|
from abc import ABC, abstractmethod
class BaseEnvelope(ABC):
def __init__(self, handle_fn):
self._handle_fn = handle_fn
def handle(self, data, context):
if data:
data = self.parse_input(data)
results = self._handle_fn(data, context)
if results:
results = self.format_output(results)
return results
@abstractmethod
def parse_input(self, data):
pass
@abstractmethod
def format_output(self, data):
pass
| true
| true
|
1c42fff8091fe2db9607f1770bc96be7dac389e4
| 48,256
|
py
|
Python
|
modeling.py
|
pingheng001/Cnn-Bert
|
d2be31634d693fbbe3b4bf2b28eb83af015cda72
|
[
"Apache-2.0"
] | null | null | null |
modeling.py
|
pingheng001/Cnn-Bert
|
d2be31634d693fbbe3b4bf2b28eb83af015cda72
|
[
"Apache-2.0"
] | null | null | null |
modeling.py
|
pingheng001/Cnn-Bert
|
d2be31634d693fbbe3b4bf2b28eb83af015cda72
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
class CNNBertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.all_cnn_layers = cnn_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output_transformer = self.all_encoder_layers[-1]
self.sequence_output_cnn = self.all_encoder_layers[-1]
with tf.variable_scope("merge_cnn_transformer"):
merge_input = tf.concat([self.sequence_output_transformer, self.sequence_output_cnn], axis=-1)
self.sequence_output = tf.layers.dense(
merge_input,
config.hidden_size,
kernel_initializer=create_initializer(config.initializer_range))
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def cnn_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
# if hidden_size % num_attention_heads != 0:
# raise ValueError(
# "The hidden size (%d) is not a multiple of the number of attention "
# "heads (%d)" % (hidden_size, num_attention_heads))
# attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("cnn_layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("cnn_compute"):
cnn_output = tf.layers.conv1d(layer_input, 100, layer_idx+1, padding='same')
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
cnn_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + cnn_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| 38.947538
| 104
| 0.69266
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
class BertConfig(object):
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
with tf.variable_scope("pooler"):
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
class CNNBertModel(object):
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.all_cnn_layers = cnn_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output_transformer = self.all_encoder_layers[-1]
self.sequence_output_cnn = self.all_encoder_layers[-1]
with tf.variable_scope("merge_cnn_transformer"):
merge_input = tf.concat([self.sequence_output_transformer, self.sequence_output_cnn], axis=-1)
self.sequence_output = tf.layers.dense(
merge_input,
config.hidden_size,
kernel_initializer=create_initializer(config.initializer_range))
with tf.variable_scope("pooler"):
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def cnn_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
# if hidden_size % num_attention_heads != 0:
# raise ValueError(
# "The hidden size (%d) is not a multiple of the number of attention "
# "heads (%d)" % (hidden_size, num_attention_heads))
# attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("cnn_layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("cnn_compute"):
cnn_output = tf.layers.conv1d(layer_input, 100, layer_idx+1, padding='same')
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
cnn_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + cnn_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| true
| true
|
1c4301397a55a47bf1f513bcb7d40a739fba14ec
| 2,984
|
py
|
Python
|
KNN/KNN.py
|
bserranoanton/Machine-learning
|
093f77a9317c68649c5b0aad32b9e53170700a11
|
[
"MIT"
] | null | null | null |
KNN/KNN.py
|
bserranoanton/Machine-learning
|
093f77a9317c68649c5b0aad32b9e53170700a11
|
[
"MIT"
] | null | null | null |
KNN/KNN.py
|
bserranoanton/Machine-learning
|
093f77a9317c68649c5b0aad32b9e53170700a11
|
[
"MIT"
] | null | null | null |
# k-nn algorithm
# Álvaro García Tenorio, Belén Serrano Antón
import operator
import math
class KNN:
#This module contains the basic functions to implement the KNN algorithm
# trainData represents the points we use to train the algorithm
#by [coord1,..., coordn, "class"]
# k is the number of neighbors we are using
def __init__(self, trainData, k):
self.trainingData = trainData
self.k = k
#Returns the euclidean distance given two points
def euclideanDistance(self,point1, point2, length):
distance = 0
for i in range(length):
distance += pow((point1[i] - point2[i]), 2)
return math.sqrt(distance)
#Given a point, this function returns the k nearest neighbors
# neighbors = [point][dist=0]
def getNeighbors(self, point):
distances = [] #keeps the distance between point and a point in the
#trainData
length = len(point)-1 #not to use the "class" element
for i in range(len(self.trainingData)):
dist = self.euclideanDistance(point, self.trainingData[i], length)
distances.append((self.trainingData[i], dist))
distances.sort(key=operator.itemgetter(1)) #order the distances by
#distance
neighbors = [] #keeps the k nearest neighbors
for i in range(self.k):
neighbors.append(distances[i][0])
return neighbors
#We calculate the class that appears most often in neighbors
def calculateNeighborsClass(self, neighbors):
count = {}; #set of classes that appear in neighbors
maxClassCount = 0 #number of times that the most often class appears
maxClass = 0 #class that appears most often in neighbors
indexNeighborClass = len(neighbors[0])-1 #coordenate that keeps the
#class of a point
for i in range(self.k):
if(neighbors[i][indexNeighborClass] not in count):
# The class at the ith index
# is not in the count dict.
# Initialize it to 1.
count[neighbors[i][indexNeighborClass]] = 1;
else:
# Found another item of class
#neighbors[i][indexNeighborClass].
#Increment its counter.
count[neighbors[i][indexNeighborClass]] += 1;
if(count[neighbors[i][indexNeighborClass]] > maxClassCount):
maxClassCount = count[neighbors[i][indexNeighborClass]]
maxClass = neighbors[i][indexNeighborClass]
return maxClass;
#Given a point we calculate its class
def classify(self, newPoint):
neighbors = [];
neighbors = self.getNeighbors(newPoint);
return self.calculateNeighborsClass(neighbors);
| 38.753247
| 77
| 0.586126
|
import operator
import math
class KNN:
def __init__(self, trainData, k):
self.trainingData = trainData
self.k = k
def euclideanDistance(self,point1, point2, length):
distance = 0
for i in range(length):
distance += pow((point1[i] - point2[i]), 2)
return math.sqrt(distance)
def getNeighbors(self, point):
distances = []
length = len(point)-1
for i in range(len(self.trainingData)):
dist = self.euclideanDistance(point, self.trainingData[i], length)
distances.append((self.trainingData[i], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for i in range(self.k):
neighbors.append(distances[i][0])
return neighbors
def calculateNeighborsClass(self, neighbors):
count = {};
maxClassCount = 0
maxClass = 0
indexNeighborClass = len(neighbors[0])-1
for i in range(self.k):
if(neighbors[i][indexNeighborClass] not in count):
count[neighbors[i][indexNeighborClass]] = 1;
else:
count[neighbors[i][indexNeighborClass]] += 1;
if(count[neighbors[i][indexNeighborClass]] > maxClassCount):
maxClassCount = count[neighbors[i][indexNeighborClass]]
maxClass = neighbors[i][indexNeighborClass]
return maxClass;
def classify(self, newPoint):
neighbors = [];
neighbors = self.getNeighbors(newPoint);
return self.calculateNeighborsClass(neighbors);
| true
| true
|
1c43014cf936d0e9e17435d3daa2068d346de02e
| 2,010
|
py
|
Python
|
ropper/common/abstract.py
|
cbayet/Ropper
|
66adeb0a1d4322ced69643c3be2552c057d116d2
|
[
"BSD-3-Clause"
] | 1,502
|
2015-01-07T09:11:08.000Z
|
2022-03-29T10:08:26.000Z
|
ropper/common/abstract.py
|
cbayet/Ropper
|
66adeb0a1d4322ced69643c3be2552c057d116d2
|
[
"BSD-3-Clause"
] | 126
|
2015-03-10T15:32:26.000Z
|
2022-03-03T08:30:10.000Z
|
ropper/common/abstract.py
|
cbayet/Ropper
|
66adeb0a1d4322ced69643c3be2552c057d116d2
|
[
"BSD-3-Clause"
] | 214
|
2015-03-10T00:17:16.000Z
|
2022-03-19T07:04:08.000Z
|
# coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import *
class AbstractSingletonMeta(ABCMeta):
def __init__(self, name, bases, namespace):
super(AbstractSingletonMeta, self).__init__(name, bases, namespace)
self._instance = None
def __call__(self):
if not self._instance:
self._instance = super(AbstractSingletonMeta, self).__call__()
return self._instance
Abstract = ABCMeta('Abstract', (), {})
AbstractSingleton = AbstractSingletonMeta('AbstractSingelton', (), {})
| 42.765957
| 82
| 0.765672
|
from abc import *
class AbstractSingletonMeta(ABCMeta):
def __init__(self, name, bases, namespace):
super(AbstractSingletonMeta, self).__init__(name, bases, namespace)
self._instance = None
def __call__(self):
if not self._instance:
self._instance = super(AbstractSingletonMeta, self).__call__()
return self._instance
Abstract = ABCMeta('Abstract', (), {})
AbstractSingleton = AbstractSingletonMeta('AbstractSingelton', (), {})
| true
| true
|
1c430169866ca2e27445a302f4d0d86c027f6c3b
| 104
|
py
|
Python
|
Problems/Find even/task.py
|
gabrielizalo/jetbrains-academy-zookeeper
|
467b43da3cb81f82987daf6b063eb2078d476d4f
|
[
"MIT"
] | null | null | null |
Problems/Find even/task.py
|
gabrielizalo/jetbrains-academy-zookeeper
|
467b43da3cb81f82987daf6b063eb2078d476d4f
|
[
"MIT"
] | null | null | null |
Problems/Find even/task.py
|
gabrielizalo/jetbrains-academy-zookeeper
|
467b43da3cb81f82987daf6b063eb2078d476d4f
|
[
"MIT"
] | null | null | null |
user_number = int(input())
counter = 2
while counter < user_number:
print(counter)
counter += 2
| 17.333333
| 28
| 0.673077
|
user_number = int(input())
counter = 2
while counter < user_number:
print(counter)
counter += 2
| true
| true
|
1c4302180ab7ea072a9425eafffdf70403bb70e8
| 462
|
py
|
Python
|
1-100/61-70/62-uniquePath/uniquePath-dp.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
1-100/61-70/62-uniquePath/uniquePath-dp.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
1-100/61-70/62-uniquePath/uniquePath-dp.py
|
xuychen/Leetcode
|
c8bf33af30569177c5276ffcd72a8d93ba4c402a
|
[
"MIT"
] | null | null | null |
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if not m or not n:
return 0
matrix = [[1] * n] + [[1] + [0] * (n - 1) for i in range(m-1)]
for i in range(1, m):
for j in range(1, n):
matrix[i][j] = matrix[i-1][j] + matrix[i][j-1]
return matrix[-1][-1]
| 25.666667
| 70
| 0.374459
|
class Solution(object):
def uniquePaths(self, m, n):
if not m or not n:
return 0
matrix = [[1] * n] + [[1] + [0] * (n - 1) for i in range(m-1)]
for i in range(1, m):
for j in range(1, n):
matrix[i][j] = matrix[i-1][j] + matrix[i][j-1]
return matrix[-1][-1]
| true
| true
|
1c4302c6b4699ff6d29882340803de02d9f56132
| 1,478
|
py
|
Python
|
tests/test_blink1.py
|
vmalloc/pytest-blink1
|
cd783203dac4ffa0b43d95ac8c448a92888a7744
|
[
"MIT"
] | 3
|
2017-04-21T19:38:55.000Z
|
2019-05-10T13:15:48.000Z
|
tests/test_blink1.py
|
vmalloc/pytest-blink1
|
cd783203dac4ffa0b43d95ac8c448a92888a7744
|
[
"MIT"
] | null | null | null |
tests/test_blink1.py
|
vmalloc/pytest-blink1
|
cd783203dac4ffa0b43d95ac8c448a92888a7744
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def test_bar_fixture(testdir):
"""Make sure that pytest accepts our fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
def test_sth(bar):
assert bar == "europython2015"
""")
# run pytest with the following cmd args
result = testdir.runpytest(
'--foo=europython2015',
'-v'
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_sth PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'blink1:',
'*--foo=DEST_FOO*Set the value for the fixture "bar".',
])
def test_hello_ini_setting(testdir):
testdir.makeini("""
[pytest]
HELLO = world
""")
testdir.makepyfile("""
import pytest
@pytest.fixture
def hello(request):
return request.config.getini('HELLO')
def test_hello_world(hello):
assert hello == 'world'
""")
result = testdir.runpytest('-v')
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_hello_world PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
| 22.738462
| 66
| 0.600135
|
def test_bar_fixture(testdir):
testdir.makepyfile("""
def test_sth(bar):
assert bar == "europython2015"
""")
result = testdir.runpytest(
'--foo=europython2015',
'-v'
)
result.stdout.fnmatch_lines([
'*::test_sth PASSED',
])
assert result.ret == 0
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
result.stdout.fnmatch_lines([
'blink1:',
'*--foo=DEST_FOO*Set the value for the fixture "bar".',
])
def test_hello_ini_setting(testdir):
testdir.makeini("""
[pytest]
HELLO = world
""")
testdir.makepyfile("""
import pytest
@pytest.fixture
def hello(request):
return request.config.getini('HELLO')
def test_hello_world(hello):
assert hello == 'world'
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines([
'*::test_hello_world PASSED',
])
assert result.ret == 0
| true
| true
|
1c43035982caa5106ff95faf47433fe555b5c886
| 4,005
|
py
|
Python
|
pyACA/computeNoveltyFunction.py
|
RichardYang40148/pyACA-1
|
870d100ed232cca5a890570426116f70cd0736c8
|
[
"MIT"
] | null | null | null |
pyACA/computeNoveltyFunction.py
|
RichardYang40148/pyACA-1
|
870d100ed232cca5a890570426116f70cd0736c8
|
[
"MIT"
] | null | null | null |
pyACA/computeNoveltyFunction.py
|
RichardYang40148/pyACA-1
|
870d100ed232cca5a890570426116f70cd0736c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
computeNoveltyFunction
computes the novelty function for onset detection
supported novelty measures are:
'Flux',
'Laroche',
'Hainsworth'
Args:
cNoveltyName: name of the novelty measure
afAudioData: array with floating point audio data.
f_s: sample rate
afWindow: FFT window of length iBlockLength (default: hann)
iBlockLength: internal block length (default: 4096 samples)
iHopLength: internal hop length (default: 2048 samples)
Returns:
d novelty function
t time stamps
iPeaks indices of picked onset times
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import spectrogram
from scipy.signal import filtfilt
from scipy.signal import find_peaks
from ToolComputeHann import ToolComputeHann
def computeNoveltyFunction(cNoveltyName, afAudioData, f_s, afWindow=None, iBlockLength=4096, iHopLength=512):
# compute window function for FFT
if afWindow is None:
afWindow = ToolComputeHann(iBlockLength)
assert(afWindow.shape[0] == iBlockLength), "parameter error: invalid window dimension"
mypackage = __import__('Novelty' + cNoveltyName)
hNoveltyFunc = getattr(mypackage, 'Novelty' + cNoveltyName)
# initialization
fLengthLpInS = 0.3
iLengthLp = np.max([2, math.ceil(fLengthLpInS*f_s/iHopLength)])
# pre-processing: downmixing
if afAudioData.ndim > 1:
afAudioData = afAudioData.mean(axis=1)
# pre-processing: normalization
fNorm = np.max(np.abs(afAudioData));
if fNorm != 0:
afAudioData = afAudioData/fNorm
# in the real world, we would do this block by block...
[f,t,X] = spectrogram( afAudioData,
f_s,
afWindow,
iBlockLength,
iBlockLength - iHopLength,
iBlockLength,
False,
True,
'spectrum')
# scale the same as for matlab
X = np.sqrt(X/2)
# novelty function
d = hNoveltyFunc(X,f_s)
# smooth novelty function
b = np.ones(10)/10
d = filtfilt (b,1,d)
d[d<0] = 0
# compute threshold
b = np.ones(iLengthLp)/iLengthLp
G_T = .5 * np.mean(d[np.arange(1,d.shape[0])]) + filtfilt (b,1,d)
# find local maxima above the threshold
iPeaks = find_peaks(d-G_T, height = 0)
return (d,t,iPeaks[0])
def computeNoveltyFunctionCl(cPath, cNoveltyName):
from ToolReadAudio import ToolReadAudio
[f_s,afAudioData] = ToolReadAudio(cPath)
#afAudioData = np.sin(2*np.pi * np.arange(f_s*1)*440./f_s)
[d,t,iPeaks] = computeNoveltyFunction(cNoveltyName, afAudioData, f_s)
# plot feature output
if bPlotOutput:
plt.plot(t,d)
return (d,t,iPeaks)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Compute key of wav file')
parser.add_argument('--infile', metavar='path', required=False,
help='path to input audio file')
parser.add_argument('--noveltyname', metavar='string', required=False,
help='novelty measure name in the format NoveltyFlux')
parser.add_argument('--plotoutput', metavar='bool', required=False,
help='option to plot the output')
# retrieve command line args
cPath = parser.parse_args().infile
cNoveltyName = parser.parse_args().noveltyname
bPlotOutput = parser.parse_args().plotoutput
#only for debugging
if __debug__:
if not cPath:
cPath = "c:/temp/test.wav"
if not cNoveltyName:
cNoveltyName = "Laroche"
if not bPlotOutput:
bPlotOutput = True
# call the function
computeNoveltyFunctionCl(cPath, cNoveltyName)
| 29.88806
| 109
| 0.616729
|
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import spectrogram
from scipy.signal import filtfilt
from scipy.signal import find_peaks
from ToolComputeHann import ToolComputeHann
def computeNoveltyFunction(cNoveltyName, afAudioData, f_s, afWindow=None, iBlockLength=4096, iHopLength=512):
if afWindow is None:
afWindow = ToolComputeHann(iBlockLength)
assert(afWindow.shape[0] == iBlockLength), "parameter error: invalid window dimension"
mypackage = __import__('Novelty' + cNoveltyName)
hNoveltyFunc = getattr(mypackage, 'Novelty' + cNoveltyName)
fLengthLpInS = 0.3
iLengthLp = np.max([2, math.ceil(fLengthLpInS*f_s/iHopLength)])
if afAudioData.ndim > 1:
afAudioData = afAudioData.mean(axis=1)
fNorm = np.max(np.abs(afAudioData));
if fNorm != 0:
afAudioData = afAudioData/fNorm
[f,t,X] = spectrogram( afAudioData,
f_s,
afWindow,
iBlockLength,
iBlockLength - iHopLength,
iBlockLength,
False,
True,
'spectrum')
X = np.sqrt(X/2)
d = hNoveltyFunc(X,f_s)
b = np.ones(10)/10
d = filtfilt (b,1,d)
d[d<0] = 0
b = np.ones(iLengthLp)/iLengthLp
G_T = .5 * np.mean(d[np.arange(1,d.shape[0])]) + filtfilt (b,1,d)
iPeaks = find_peaks(d-G_T, height = 0)
return (d,t,iPeaks[0])
def computeNoveltyFunctionCl(cPath, cNoveltyName):
from ToolReadAudio import ToolReadAudio
[f_s,afAudioData] = ToolReadAudio(cPath)
[d,t,iPeaks] = computeNoveltyFunction(cNoveltyName, afAudioData, f_s)
if bPlotOutput:
plt.plot(t,d)
return (d,t,iPeaks)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Compute key of wav file')
parser.add_argument('--infile', metavar='path', required=False,
help='path to input audio file')
parser.add_argument('--noveltyname', metavar='string', required=False,
help='novelty measure name in the format NoveltyFlux')
parser.add_argument('--plotoutput', metavar='bool', required=False,
help='option to plot the output')
cPath = parser.parse_args().infile
cNoveltyName = parser.parse_args().noveltyname
bPlotOutput = parser.parse_args().plotoutput
if __debug__:
if not cPath:
cPath = "c:/temp/test.wav"
if not cNoveltyName:
cNoveltyName = "Laroche"
if not bPlotOutput:
bPlotOutput = True
computeNoveltyFunctionCl(cPath, cNoveltyName)
| true
| true
|
1c43037b1e1023ff562de4a07159cbdc5fd8f81b
| 335
|
py
|
Python
|
Prog4comp-SL-HPL-Extra/src/file_handling_numpy.py
|
computational-medicine/BMED360-2021
|
2c6052b9affedf1fee23c89d23941bf08eb2614c
|
[
"MIT"
] | 2
|
2021-04-19T23:22:17.000Z
|
2021-04-20T14:04:58.000Z
|
Prog4comp-SL-HPL-Extra/src/file_handling_numpy.py
|
computational-medicine/BMED360-2021
|
2c6052b9affedf1fee23c89d23941bf08eb2614c
|
[
"MIT"
] | null | null | null |
Prog4comp-SL-HPL-Extra/src/file_handling_numpy.py
|
computational-medicine/BMED360-2021
|
2c6052b9affedf1fee23c89d23941bf08eb2614c
|
[
"MIT"
] | 2
|
2020-03-26T17:15:13.000Z
|
2020-05-25T08:10:06.000Z
|
filename = 'tmp.dat'
import numpy
data = numpy.loadtxt(filename, comments='#')
x = data[:,0]
y = data[:,1]
data[:,1] = numpy.log(y) # insert transformed y back in array
filename = 'tmp_out.dat'
outfile = open(filename, 'w') # open file for writing
outfile.write('# x and y coordinates\n')
numpy.savetxt(outfile, data, fmt='%10.5f')
| 27.916667
| 62
| 0.677612
|
filename = 'tmp.dat'
import numpy
data = numpy.loadtxt(filename, comments='#')
x = data[:,0]
y = data[:,1]
data[:,1] = numpy.log(y)
filename = 'tmp_out.dat'
outfile = open(filename, 'w')
outfile.write('# x and y coordinates\n')
numpy.savetxt(outfile, data, fmt='%10.5f')
| true
| true
|
1c4304a00b57f2257d41f5489361bfdb9ff354fc
| 1,265
|
py
|
Python
|
test/python/converters/test_circuit_to_instruction.py
|
jagunnels/qiskit-sdk-py
|
153cdde972e65c0f23675bbe17c93e18be27bd51
|
[
"Apache-2.0"
] | null | null | null |
test/python/converters/test_circuit_to_instruction.py
|
jagunnels/qiskit-sdk-py
|
153cdde972e65c0f23675bbe17c93e18be27bd51
|
[
"Apache-2.0"
] | null | null | null |
test/python/converters/test_circuit_to_instruction.py
|
jagunnels/qiskit-sdk-py
|
153cdde972e65c0f23675bbe17c93e18be27bd51
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Tests for the converters."""
import unittest
from qiskit.converters import circuit_to_instruction
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.test import QiskitTestCase
class TestCircuitToInstruction(QiskitTestCase):
"""Test Circuit to Instruction."""
def test_flatten_circuit_registers(self):
"""Check correct flattening"""
qr1 = QuantumRegister(4, 'qr1')
qr2 = QuantumRegister(3, 'qr2')
qr3 = QuantumRegister(3, 'qr3')
cr1 = ClassicalRegister(4, 'cr1')
cr2 = ClassicalRegister(1, 'cr2')
circ = QuantumCircuit(qr1, qr2, qr3, cr1, cr2)
circ.cx(qr1[1], qr2[2])
circ.measure(qr3[0], cr2[0])
inst = circuit_to_instruction(circ)
q = QuantumRegister(10, 'q')
c = ClassicalRegister(5, 'c')
self.assertEqual(inst.definition[0][1], [q[1], q[6]])
self.assertEqual(inst.definition[1][1], [q[7]])
self.assertEqual(inst.definition[1][2], [c[4]])
if __name__ == '__main__':
unittest.main(verbosity=2)
| 30.119048
| 77
| 0.658498
|
import unittest
from qiskit.converters import circuit_to_instruction
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.test import QiskitTestCase
class TestCircuitToInstruction(QiskitTestCase):
def test_flatten_circuit_registers(self):
qr1 = QuantumRegister(4, 'qr1')
qr2 = QuantumRegister(3, 'qr2')
qr3 = QuantumRegister(3, 'qr3')
cr1 = ClassicalRegister(4, 'cr1')
cr2 = ClassicalRegister(1, 'cr2')
circ = QuantumCircuit(qr1, qr2, qr3, cr1, cr2)
circ.cx(qr1[1], qr2[2])
circ.measure(qr3[0], cr2[0])
inst = circuit_to_instruction(circ)
q = QuantumRegister(10, 'q')
c = ClassicalRegister(5, 'c')
self.assertEqual(inst.definition[0][1], [q[1], q[6]])
self.assertEqual(inst.definition[1][1], [q[7]])
self.assertEqual(inst.definition[1][2], [c[4]])
if __name__ == '__main__':
unittest.main(verbosity=2)
| true
| true
|
1c4305e9885301fd71ab64074c7006c2a059a4e5
| 13,730
|
py
|
Python
|
esp-link/flash-tool/esptool-master/esptool-master/espressif/efuse/esp32c3/operations.py
|
km-tek/stm32_iot_link
|
4791dd6cdd544f145e1de9750a63918183b15dba
|
[
"MIT"
] | null | null | null |
esp-link/flash-tool/esptool-master/esptool-master/espressif/efuse/esp32c3/operations.py
|
km-tek/stm32_iot_link
|
4791dd6cdd544f145e1de9750a63918183b15dba
|
[
"MIT"
] | null | null | null |
esp-link/flash-tool/esptool-master/esptool-master/espressif/efuse/esp32c3/operations.py
|
km-tek/stm32_iot_link
|
4791dd6cdd544f145e1de9750a63918183b15dba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# This file includes the operations with eFuses for ESP32-C3 chip
#
# Copyright (C) 2020 Espressif Systems (Shanghai) PTE LTD
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import division, print_function
import argparse
import os # noqa: F401. It is used in IDF scripts
import espsecure
import esptool
from . import fields
from .. import util
from ..base_operations import (ONLY_BURN_AT_END, add_common_commands, add_force_write_always, burn_bit, burn_block_data, # noqa: F401
burn_efuse, dump, read_protect_efuse, summary, write_protect_efuse) # noqa: F401
def protect_options(p):
p.add_argument('--no-write-protect', help='Disable write-protecting of the key. The key remains writable. '
'(The keys use the RS coding scheme that does not support post-write data changes. Forced write can damage RS encoding bits.)'
' The write-protecting of keypurposes does not depend on the option, it will be set anyway.', action='store_true')
p.add_argument('--no-read-protect', help='Disable read-protecting of the key. The key remains readable software.'
'The key with keypurpose[USER, RESERVED and *_DIGEST] will remain readable anyway. '
'For the rest keypurposes the read-protection will be defined the option (Read-protect by default).', action='store_true')
def add_commands(subparsers, efuses):
add_common_commands(subparsers, efuses)
burn_key = subparsers.add_parser('burn_key', help='Burn the key block with the specified name')
protect_options(burn_key)
add_force_write_always(burn_key)
burn_key.add_argument('block', help='Key block to burn', action='append', choices=efuses.BLOCKS_FOR_KEYS)
burn_key.add_argument('keyfile', help='File containing 256 bits of binary key data', action='append', type=argparse.FileType('rb'))
burn_key.add_argument('keypurpose', help='Purpose to set.', action='append', choices=fields.EfuseKeyPurposeField.KEY_PURPOSES_NAME)
for _ in efuses.BLOCKS_FOR_KEYS:
burn_key.add_argument('block', help='Key block to burn', nargs="?", action='append', metavar="BLOCK", choices=efuses.BLOCKS_FOR_KEYS)
burn_key.add_argument('keyfile', help='File containing 256 bits of binary key data', nargs="?", action='append', metavar="KEYFILE",
type=argparse.FileType('rb'))
burn_key.add_argument('keypurpose', help='Purpose to set.', nargs="?", action='append', metavar="KEYPURPOSE",
choices=fields.EfuseKeyPurposeField.KEY_PURPOSES_NAME)
burn_key_digest = subparsers.add_parser('burn_key_digest', help='Parse a RSA public key and burn the digest to key efuse block')
protect_options(burn_key_digest)
add_force_write_always(burn_key_digest)
burn_key_digest.add_argument('block', help='Key block to burn', action='append', choices=efuses.BLOCKS_FOR_KEYS)
burn_key_digest.add_argument('keyfile', help='Key file to digest (PEM format)', action='append', type=argparse.FileType('rb'))
burn_key_digest.add_argument('keypurpose', help='Purpose to set.', action='append', choices=fields.EfuseKeyPurposeField.DIGEST_KEY_PURPOSES)
for _ in efuses.BLOCKS_FOR_KEYS:
burn_key_digest.add_argument('block', help='Key block to burn', nargs="?", action='append', metavar="BLOCK", choices=efuses.BLOCKS_FOR_KEYS)
burn_key_digest.add_argument('keyfile', help='Key file to digest (PEM format)', nargs="?", action='append', metavar="KEYFILE",
type=argparse.FileType('rb'))
burn_key_digest.add_argument('keypurpose', help='Purpose to set.', nargs="?", action='append', metavar="KEYPURPOSE",
choices=fields.EfuseKeyPurposeField.DIGEST_KEY_PURPOSES)
p = subparsers.add_parser('set_flash_voltage',
help='Permanently set the internal flash voltage regulator to either 1.8V, 3.3V or OFF. '
'This means GPIO45 can be high or low at reset without changing the flash voltage.')
p.add_argument('voltage', help='Voltage selection', choices=['1.8V', '3.3V', 'OFF'])
p = subparsers.add_parser('burn_custom_mac', help='Burn a 48-bit Custom MAC Address to EFUSE BLOCK3.')
p.add_argument('mac', help='Custom MAC Address to burn given in hexadecimal format with bytes separated by colons'
' (e.g. AA:CD:EF:01:02:03).', type=fields.base_fields.CheckArgValue(efuses, "CUSTOM_MAC"))
add_force_write_always(p)
p = subparsers.add_parser('get_custom_mac', help='Prints the Custom MAC Address.')
def burn_custom_mac(esp, efuses, args):
efuses["CUSTOM_MAC"].save(args.mac)
if ONLY_BURN_AT_END:
return
efuses.burn_all()
get_custom_mac(esp, efuses, args)
def get_custom_mac(esp, efuses, args):
print("Custom MAC Address: {}".format(efuses["CUSTOM_MAC"].get()))
def set_flash_voltage(esp, efuses, args):
raise esptool.FatalError("set_flash_voltage is not supported!")
def adc_info(esp, efuses, args):
print("")
if efuses["BLOCK2_VERSION"].get() == 1:
print("Temperature Sensor Calibration = {}C".format(efuses["TEMP_SENSOR_CAL"].get()))
print("")
print("ADC1 readings stored in efuse BLOCK2:")
print(" MODE0 D1 reading (250mV): {}".format(efuses["ADC1_MODE0_D1"].get()))
print(" MODE0 D2 reading (600mV): {}".format(efuses["ADC1_MODE0_D2"].get()))
print(" MODE1 D1 reading (250mV): {}".format(efuses["ADC1_MODE1_D1"].get()))
print(" MODE1 D2 reading (800mV): {}".format(efuses["ADC1_MODE1_D2"].get()))
print(" MODE2 D1 reading (250mV): {}".format(efuses["ADC1_MODE2_D1"].get()))
print(" MODE2 D2 reading (1000mV): {}".format(efuses["ADC1_MODE2_D2"].get()))
print(" MODE3 D1 reading (250mV): {}".format(efuses["ADC1_MODE3_D1"].get()))
print(" MODE3 D2 reading (2000mV): {}".format(efuses["ADC1_MODE3_D2"].get()))
print("")
print("ADC2 readings stored in efuse BLOCK2:")
print(" MODE0 D1 reading (250mV): {}".format(efuses["ADC2_MODE0_D1"].get()))
print(" MODE0 D2 reading (600mV): {}".format(efuses["ADC2_MODE0_D2"].get()))
print(" MODE1 D1 reading (250mV): {}".format(efuses["ADC2_MODE1_D1"].get()))
print(" MODE1 D2 reading (800mV): {}".format(efuses["ADC2_MODE1_D2"].get()))
print(" MODE2 D1 reading (250mV): {}".format(efuses["ADC2_MODE2_D1"].get()))
print(" MODE2 D2 reading (1000mV): {}".format(efuses["ADC2_MODE2_D2"].get()))
print(" MODE3 D1 reading (250mV): {}".format(efuses["ADC2_MODE3_D1"].get()))
print(" MODE3 D2 reading (2000mV): {}".format(efuses["ADC2_MODE3_D2"].get()))
else:
print("BLOCK2_VERSION = {}".format(efuses["BLOCK2_VERSION"].get_meaning()))
def burn_key(esp, efuses, args, digest=None):
if digest is None:
datafile_list = args.keyfile[0:len([name for name in args.keyfile if name is not None]):]
else:
datafile_list = digest[0:len([name for name in digest if name is not None]):]
efuses.force_write_always = args.force_write_always
block_name_list = args.block[0:len([name for name in args.block if name is not None]):]
keypurpose_list = args.keypurpose[0:len([name for name in args.keypurpose if name is not None]):]
util.check_duplicate_name_in_list(block_name_list)
if len(block_name_list) != len(datafile_list) or len(block_name_list) != len(keypurpose_list):
raise esptool.FatalError("The number of blocks (%d), datafile (%d) and keypurpose (%d) should be the same." %
(len(block_name_list), len(datafile_list), len(keypurpose_list)))
print("Burn keys to blocks:")
for block_name, datafile, keypurpose in zip(block_name_list, datafile_list, keypurpose_list):
efuse = None
for block in efuses.blocks:
if block_name == block.name or block_name in block.alias:
efuse = efuses[block.name]
if efuse is None:
raise esptool.FatalError("Unknown block name - %s" % (block_name))
num_bytes = efuse.bit_len // 8
block_num = efuses.get_index_block_by_name(block_name)
block = efuses.blocks[block_num]
if digest is None:
data = datafile.read()
else:
data = datafile
print(" - %s" % (efuse.name), end=" ")
revers_msg = None
if efuses[block.key_purpose_name].need_reverse(keypurpose):
revers_msg = "\tReversing byte order for AES-XTS hardware peripheral"
data = data[::-1]
print("-> [%s]" % (util.hexify(data, " ")))
if revers_msg:
print(revers_msg)
if len(data) != num_bytes:
raise esptool.FatalError("Incorrect key file size %d. Key file must be %d bytes (%d bits) of raw binary key data." %
(len(data), num_bytes, num_bytes * 8))
if efuses[block.key_purpose_name].need_rd_protect(keypurpose):
read_protect = False if args.no_read_protect else True
else:
read_protect = False
write_protect = not args.no_write_protect
# using efuse instead of a block gives the advantage of checking it as the whole field.
efuse.save(data)
disable_wr_protect_key_purpose = False
if efuses[block.key_purpose_name].get() != keypurpose:
if efuses[block.key_purpose_name].is_writeable():
print("\t'%s': '%s' -> '%s'." % (block.key_purpose_name, efuses[block.key_purpose_name].get(), keypurpose))
efuses[block.key_purpose_name].save(keypurpose)
disable_wr_protect_key_purpose = True
else:
raise esptool.FatalError("It is not possible to change '%s' to '%s' because write protection bit is set." %
(block.key_purpose_name, keypurpose))
else:
print("\t'%s' is already '%s'." % (block.key_purpose_name, keypurpose))
if efuses[block.key_purpose_name].is_writeable():
disable_wr_protect_key_purpose = True
if disable_wr_protect_key_purpose:
print("\tDisabling write to '%s'." % block.key_purpose_name)
efuses[block.key_purpose_name].disable_write()
if read_protect:
print("\tDisabling read to key block")
efuse.disable_read()
if write_protect:
print("\tDisabling write to key block")
efuse.disable_write()
print("")
if not write_protect:
print("Keys will remain writeable (due to --no-write-protect)")
if args.no_read_protect:
print("Keys will remain readable (due to --no-read-protect)")
if ONLY_BURN_AT_END:
return
efuses.burn_all()
print("Successful")
def burn_key_digest(esp, efuses, args):
digest_list = []
datafile_list = args.keyfile[0:len([name for name in args.keyfile if name is not None]):]
block_list = args.block[0:len([block for block in args.block if block is not None]):]
for block_name, datafile in zip(block_list, datafile_list):
efuse = None
for block in efuses.blocks:
if block_name == block.name or block_name in block.alias:
efuse = efuses[block.name]
if efuse is None:
raise esptool.FatalError("Unknown block name - %s" % (block_name))
num_bytes = efuse.bit_len // 8
digest = espsecure._digest_rsa_public_key(datafile)
if len(digest) != num_bytes:
raise esptool.FatalError("Incorrect digest size %d. Digest must be %d bytes (%d bits) of raw binary key data." %
(len(digest), num_bytes, num_bytes * 8))
digest_list.append(digest)
burn_key(esp, efuses, args, digest=digest_list)
def espefuse(esp, efuses, args, command):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='operation')
add_commands(subparsers, efuses)
cmd_line_args = parser.parse_args(command.split())
# copy arguments from args to cmd_line_args
vars(cmd_line_args).update(vars(args))
if cmd_line_args.operation is None:
parser.print_help()
parser.exit(1)
operation_func = globals()[cmd_line_args.operation]
# each 'operation' is a module-level function of the same name
operation_func(esp, efuses, cmd_line_args)
def execute_scripts(esp, efuses, args):
del args.operation
scripts = args.scripts
del args.scripts
global ONLY_BURN_AT_END
ONLY_BURN_AT_END = True
for file in scripts:
with open(file.name, 'r') as file:
exec(file.read())
if args.debug:
for block in efuses.blocks:
data = block.get_bitstring(from_read=False)
block.print_block(data, "regs_for_burn", args.debug)
efuses.burn_all()
| 49.566787
| 148
| 0.658704
|
from __future__ import division, print_function
import argparse
import os
import espsecure
import esptool
from . import fields
from .. import util
from ..base_operations import (ONLY_BURN_AT_END, add_common_commands, add_force_write_always, burn_bit, burn_block_data,
burn_efuse, dump, read_protect_efuse, summary, write_protect_efuse)
def protect_options(p):
p.add_argument('--no-write-protect', help='Disable write-protecting of the key. The key remains writable. '
'(The keys use the RS coding scheme that does not support post-write data changes. Forced write can damage RS encoding bits.)'
' The write-protecting of keypurposes does not depend on the option, it will be set anyway.', action='store_true')
p.add_argument('--no-read-protect', help='Disable read-protecting of the key. The key remains readable software.'
'The key with keypurpose[USER, RESERVED and *_DIGEST] will remain readable anyway. '
'For the rest keypurposes the read-protection will be defined the option (Read-protect by default).', action='store_true')
def add_commands(subparsers, efuses):
add_common_commands(subparsers, efuses)
burn_key = subparsers.add_parser('burn_key', help='Burn the key block with the specified name')
protect_options(burn_key)
add_force_write_always(burn_key)
burn_key.add_argument('block', help='Key block to burn', action='append', choices=efuses.BLOCKS_FOR_KEYS)
burn_key.add_argument('keyfile', help='File containing 256 bits of binary key data', action='append', type=argparse.FileType('rb'))
burn_key.add_argument('keypurpose', help='Purpose to set.', action='append', choices=fields.EfuseKeyPurposeField.KEY_PURPOSES_NAME)
for _ in efuses.BLOCKS_FOR_KEYS:
burn_key.add_argument('block', help='Key block to burn', nargs="?", action='append', metavar="BLOCK", choices=efuses.BLOCKS_FOR_KEYS)
burn_key.add_argument('keyfile', help='File containing 256 bits of binary key data', nargs="?", action='append', metavar="KEYFILE",
type=argparse.FileType('rb'))
burn_key.add_argument('keypurpose', help='Purpose to set.', nargs="?", action='append', metavar="KEYPURPOSE",
choices=fields.EfuseKeyPurposeField.KEY_PURPOSES_NAME)
burn_key_digest = subparsers.add_parser('burn_key_digest', help='Parse a RSA public key and burn the digest to key efuse block')
protect_options(burn_key_digest)
add_force_write_always(burn_key_digest)
burn_key_digest.add_argument('block', help='Key block to burn', action='append', choices=efuses.BLOCKS_FOR_KEYS)
burn_key_digest.add_argument('keyfile', help='Key file to digest (PEM format)', action='append', type=argparse.FileType('rb'))
burn_key_digest.add_argument('keypurpose', help='Purpose to set.', action='append', choices=fields.EfuseKeyPurposeField.DIGEST_KEY_PURPOSES)
for _ in efuses.BLOCKS_FOR_KEYS:
burn_key_digest.add_argument('block', help='Key block to burn', nargs="?", action='append', metavar="BLOCK", choices=efuses.BLOCKS_FOR_KEYS)
burn_key_digest.add_argument('keyfile', help='Key file to digest (PEM format)', nargs="?", action='append', metavar="KEYFILE",
type=argparse.FileType('rb'))
burn_key_digest.add_argument('keypurpose', help='Purpose to set.', nargs="?", action='append', metavar="KEYPURPOSE",
choices=fields.EfuseKeyPurposeField.DIGEST_KEY_PURPOSES)
p = subparsers.add_parser('set_flash_voltage',
help='Permanently set the internal flash voltage regulator to either 1.8V, 3.3V or OFF. '
'This means GPIO45 can be high or low at reset without changing the flash voltage.')
p.add_argument('voltage', help='Voltage selection', choices=['1.8V', '3.3V', 'OFF'])
p = subparsers.add_parser('burn_custom_mac', help='Burn a 48-bit Custom MAC Address to EFUSE BLOCK3.')
p.add_argument('mac', help='Custom MAC Address to burn given in hexadecimal format with bytes separated by colons'
' (e.g. AA:CD:EF:01:02:03).', type=fields.base_fields.CheckArgValue(efuses, "CUSTOM_MAC"))
add_force_write_always(p)
p = subparsers.add_parser('get_custom_mac', help='Prints the Custom MAC Address.')
def burn_custom_mac(esp, efuses, args):
efuses["CUSTOM_MAC"].save(args.mac)
if ONLY_BURN_AT_END:
return
efuses.burn_all()
get_custom_mac(esp, efuses, args)
def get_custom_mac(esp, efuses, args):
print("Custom MAC Address: {}".format(efuses["CUSTOM_MAC"].get()))
def set_flash_voltage(esp, efuses, args):
raise esptool.FatalError("set_flash_voltage is not supported!")
def adc_info(esp, efuses, args):
print("")
if efuses["BLOCK2_VERSION"].get() == 1:
print("Temperature Sensor Calibration = {}C".format(efuses["TEMP_SENSOR_CAL"].get()))
print("")
print("ADC1 readings stored in efuse BLOCK2:")
print(" MODE0 D1 reading (250mV): {}".format(efuses["ADC1_MODE0_D1"].get()))
print(" MODE0 D2 reading (600mV): {}".format(efuses["ADC1_MODE0_D2"].get()))
print(" MODE1 D1 reading (250mV): {}".format(efuses["ADC1_MODE1_D1"].get()))
print(" MODE1 D2 reading (800mV): {}".format(efuses["ADC1_MODE1_D2"].get()))
print(" MODE2 D1 reading (250mV): {}".format(efuses["ADC1_MODE2_D1"].get()))
print(" MODE2 D2 reading (1000mV): {}".format(efuses["ADC1_MODE2_D2"].get()))
print(" MODE3 D1 reading (250mV): {}".format(efuses["ADC1_MODE3_D1"].get()))
print(" MODE3 D2 reading (2000mV): {}".format(efuses["ADC1_MODE3_D2"].get()))
print("")
print("ADC2 readings stored in efuse BLOCK2:")
print(" MODE0 D1 reading (250mV): {}".format(efuses["ADC2_MODE0_D1"].get()))
print(" MODE0 D2 reading (600mV): {}".format(efuses["ADC2_MODE0_D2"].get()))
print(" MODE1 D1 reading (250mV): {}".format(efuses["ADC2_MODE1_D1"].get()))
print(" MODE1 D2 reading (800mV): {}".format(efuses["ADC2_MODE1_D2"].get()))
print(" MODE2 D1 reading (250mV): {}".format(efuses["ADC2_MODE2_D1"].get()))
print(" MODE2 D2 reading (1000mV): {}".format(efuses["ADC2_MODE2_D2"].get()))
print(" MODE3 D1 reading (250mV): {}".format(efuses["ADC2_MODE3_D1"].get()))
print(" MODE3 D2 reading (2000mV): {}".format(efuses["ADC2_MODE3_D2"].get()))
else:
print("BLOCK2_VERSION = {}".format(efuses["BLOCK2_VERSION"].get_meaning()))
def burn_key(esp, efuses, args, digest=None):
if digest is None:
datafile_list = args.keyfile[0:len([name for name in args.keyfile if name is not None]):]
else:
datafile_list = digest[0:len([name for name in digest if name is not None]):]
efuses.force_write_always = args.force_write_always
block_name_list = args.block[0:len([name for name in args.block if name is not None]):]
keypurpose_list = args.keypurpose[0:len([name for name in args.keypurpose if name is not None]):]
util.check_duplicate_name_in_list(block_name_list)
if len(block_name_list) != len(datafile_list) or len(block_name_list) != len(keypurpose_list):
raise esptool.FatalError("The number of blocks (%d), datafile (%d) and keypurpose (%d) should be the same." %
(len(block_name_list), len(datafile_list), len(keypurpose_list)))
print("Burn keys to blocks:")
for block_name, datafile, keypurpose in zip(block_name_list, datafile_list, keypurpose_list):
efuse = None
for block in efuses.blocks:
if block_name == block.name or block_name in block.alias:
efuse = efuses[block.name]
if efuse is None:
raise esptool.FatalError("Unknown block name - %s" % (block_name))
num_bytes = efuse.bit_len // 8
block_num = efuses.get_index_block_by_name(block_name)
block = efuses.blocks[block_num]
if digest is None:
data = datafile.read()
else:
data = datafile
print(" - %s" % (efuse.name), end=" ")
revers_msg = None
if efuses[block.key_purpose_name].need_reverse(keypurpose):
revers_msg = "\tReversing byte order for AES-XTS hardware peripheral"
data = data[::-1]
print("-> [%s]" % (util.hexify(data, " ")))
if revers_msg:
print(revers_msg)
if len(data) != num_bytes:
raise esptool.FatalError("Incorrect key file size %d. Key file must be %d bytes (%d bits) of raw binary key data." %
(len(data), num_bytes, num_bytes * 8))
if efuses[block.key_purpose_name].need_rd_protect(keypurpose):
read_protect = False if args.no_read_protect else True
else:
read_protect = False
write_protect = not args.no_write_protect
efuse.save(data)
disable_wr_protect_key_purpose = False
if efuses[block.key_purpose_name].get() != keypurpose:
if efuses[block.key_purpose_name].is_writeable():
print("\t'%s': '%s' -> '%s'." % (block.key_purpose_name, efuses[block.key_purpose_name].get(), keypurpose))
efuses[block.key_purpose_name].save(keypurpose)
disable_wr_protect_key_purpose = True
else:
raise esptool.FatalError("It is not possible to change '%s' to '%s' because write protection bit is set." %
(block.key_purpose_name, keypurpose))
else:
print("\t'%s' is already '%s'." % (block.key_purpose_name, keypurpose))
if efuses[block.key_purpose_name].is_writeable():
disable_wr_protect_key_purpose = True
if disable_wr_protect_key_purpose:
print("\tDisabling write to '%s'." % block.key_purpose_name)
efuses[block.key_purpose_name].disable_write()
if read_protect:
print("\tDisabling read to key block")
efuse.disable_read()
if write_protect:
print("\tDisabling write to key block")
efuse.disable_write()
print("")
if not write_protect:
print("Keys will remain writeable (due to --no-write-protect)")
if args.no_read_protect:
print("Keys will remain readable (due to --no-read-protect)")
if ONLY_BURN_AT_END:
return
efuses.burn_all()
print("Successful")
def burn_key_digest(esp, efuses, args):
digest_list = []
datafile_list = args.keyfile[0:len([name for name in args.keyfile if name is not None]):]
block_list = args.block[0:len([block for block in args.block if block is not None]):]
for block_name, datafile in zip(block_list, datafile_list):
efuse = None
for block in efuses.blocks:
if block_name == block.name or block_name in block.alias:
efuse = efuses[block.name]
if efuse is None:
raise esptool.FatalError("Unknown block name - %s" % (block_name))
num_bytes = efuse.bit_len // 8
digest = espsecure._digest_rsa_public_key(datafile)
if len(digest) != num_bytes:
raise esptool.FatalError("Incorrect digest size %d. Digest must be %d bytes (%d bits) of raw binary key data." %
(len(digest), num_bytes, num_bytes * 8))
digest_list.append(digest)
burn_key(esp, efuses, args, digest=digest_list)
def espefuse(esp, efuses, args, command):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='operation')
add_commands(subparsers, efuses)
cmd_line_args = parser.parse_args(command.split())
vars(cmd_line_args).update(vars(args))
if cmd_line_args.operation is None:
parser.print_help()
parser.exit(1)
operation_func = globals()[cmd_line_args.operation]
operation_func(esp, efuses, cmd_line_args)
def execute_scripts(esp, efuses, args):
del args.operation
scripts = args.scripts
del args.scripts
global ONLY_BURN_AT_END
ONLY_BURN_AT_END = True
for file in scripts:
with open(file.name, 'r') as file:
exec(file.read())
if args.debug:
for block in efuses.blocks:
data = block.get_bitstring(from_read=False)
block.print_block(data, "regs_for_burn", args.debug)
efuses.burn_all()
| true
| true
|
1c43060d143e8a9a2e3eea79632f0c6fe9dda0e6
| 8,173
|
py
|
Python
|
libs/utilities.py
|
CaptainBoggle/bakerbot
|
ef93c8e636b0f1ee514a0b1cef2ab43b315d974e
|
[
"MIT"
] | null | null | null |
libs/utilities.py
|
CaptainBoggle/bakerbot
|
ef93c8e636b0f1ee514a0b1cef2ab43b315d974e
|
[
"MIT"
] | 2
|
2021-06-19T11:09:02.000Z
|
2021-06-19T11:21:21.000Z
|
libs/utilities.py
|
CaptainBoggle/bakerbot
|
ef93c8e636b0f1ee514a0b1cef2ab43b315d974e
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
import datetime as dt
import typing as t
import discord
import asyncio
import re
class Colours:
regular = 0xF5CC00 # Used for everything else.
success = 0x00C92C # Used for successful queries.
failure = 0xFF3300 # Used for error messages.
gaming = 0x0095FF # Used for game-related commands.
class Regexes:
# Used to detect URLs.
urls = re.compile(
r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
)
markdown = re.compile(r"([*_~`|>])")
@classmethod
def url(cls, string: str) -> bool:
# Return whether a given string is a URL or not.
return bool(re.match(cls.urls, string))
@classmethod
def escapemd(cls, string: str) -> str:
# Return a string with escaped markdown characters.
return cls.markdown.sub(r"\\\1", string)
class Icons:
tick = "https://upload.wikimedia.org/wikipedia/commons/thumb/7/73/Flat_tick_icon.svg/500px-Flat_tick_icon.svg.png"
cross = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/8f/Flat_cross_icon.svg/500px-Flat_cross_icon.svg.png"
info = "https://icon-library.com/images/info-icon-svg/info-icon-svg-5.jpg"
illuminati = "https://upload.wikimedia.org/wikipedia/commons/a/a9/Illuminati_triangle_eye.png"
rfa = "https://upload.wikimedia.org/wikipedia/commons/4/40/Radio_Free_Asia_%28logo%29.png"
wikipedia = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/80/Wikipedia-logo-v2.svg/500px-Wikipedia-logo-v2.svg.png"
class Embeds:
@staticmethod
def status(success: bool, desc: str) -> discord.Embed:
# Select colours/icon/title for success or failure embeds.
status = "Operation successful!" if success else "Operation failed!"
colour = Colours.success if success else Colours.failure
icon = Icons.tick if success else Icons.cross
# Create embed and set relevant data before returning.
embed = discord.Embed(colour=colour, timestamp=dt.datetime.utcnow())
embed.set_footer(text=status, icon_url=icon)
if desc is not None:
embed.description = desc
return embed
@staticmethod
def now() -> dt.datetime:
# Return current UTC time.
return dt.datetime.utcnow()
class Choices:
emojis = ["1️⃣", "2️⃣", "3️⃣", "4️⃣", "5️⃣", "6️⃣", "7️⃣", "8️⃣", "9️⃣"]
special = ["❌"]
@classmethod
async def prompt(
cls, ctx: commands.Context, embed: discord.Embed, n: int, author_only: bool
) -> t.Optional[int]:
# List of available reactions, including any special control emojis.
options = list(cls.emojis)[: min(n, len(cls.emojis))] + Choices.special
# Lambda check to ensure that the reaction/author/message is correct.
check = (
lambda e, u: e.emoji in options
and u == ctx.author
and e.message.id == msg.id
if author_only
else lambda e, u: e.emoji in options and e.message.id == msg.id
)
# Send the embed and add reactions.
msg = await ctx.send(embed=embed)
for emoji in options:
await msg.add_reaction(emoji)
try: # Await a response from the user.
reaction, user = await ctx.bot.wait_for(
"reaction_add", timeout=30, check=check
)
except asyncio.TimeoutError:
fail = Embeds.status(success=False, desc="Timeout reached (30 seconds).")
await msg.clear_reactions()
await msg.edit(embed=fail)
return None
# Get the corresponding value.
await msg.delete()
if reaction.emoji == Choices.special[0]:
return None
return Choices.emojis.index(reaction.emoji)
class Paginator:
emojis = ["⏮", "◀", "▶", "⏭", "⏹"]
def __init__(
self,
embeds: t.Optional[t.List[discord.Embed]],
message: t.Optional[discord.Message],
) -> None:
self.embeds = embeds if embeds else []
self.message = message
self.index = 0
# Can't be initialised at startup.
self.users: t.List[discord.User] = None
self.template: discord.Embed = None
self.task: asyncio.Task = None
@property
def newembed(self) -> discord.Embed:
# Return a fresh template copy.
fresh = self.template.copy()
self.embeds.append(fresh)
return fresh
def add_description(self, line: str) -> None:
# Add lines while respecting the description character limit.
current = self.embeds[-1] if self.embeds else self.newembed
if len(current.description) + len(line) > 2048:
current = self.newembed
if current.description == discord.Embed.Empty:
current.description = line
else:
current.description += line
def add_field(self, name: str, value: str, inline: bool) -> None:
# Add fields while respecting the embed's character limit.
current = self.embeds[-1] if self.embeds else self.newembed
if len(current) + len(name) + len(value) > 6000 or len(current.fields) > 24:
current = self.newembed
current.add_field(
name=name,
value=value if len(value) < 1024 else f"{value[0:1021]}...",
inline=inline,
)
async def start(
self, ctx: commands.Context, users: t.Union[discord.User, t.List[discord.User]]
) -> None:
# Format our embeds before starting the paginator.
for index, embed in enumerate(self.embeds, 1):
if embed.footer.text != discord.Embed.Empty:
footer = f"{embed.footer.text} • "
else:
footer = ""
footer += f"Page {index}/{len(self.embeds)}"
embed.set_footer(text=footer, icon_url=embed.footer.icon_url)
# Makes sure self.message is a valid Message object.
if self.message is None:
self.message = await ctx.send(embed=self.embeds[0])
else:
await self.message.edit(embed=self.embeds[0])
# Create a task instead of using await so it doesn't block.
self.users = users if isinstance(users, list) else [users]
self.task = ctx.bot.loop.create_task(self.run(ctx=ctx))
async def stop(self) -> None:
# Cancel the paginator's task and (mostly) reset its internal state.
await self.message.clear_reactions()
self.task.cancel()
self.users = None
self.embeds = []
self.index = 0
async def run(self, ctx: commands.Context) -> None:
# Start the paginator. Only stops upon await self.stop() or a stop reaction.
check = (
lambda r, u: r.message.id == self.message.id
and r.emoji in Paginator.emojis
and u in self.users
)
for emoji in Paginator.emojis:
await self.message.add_reaction(emoji)
while True:
reaction, user = await ctx.bot.wait_for("reaction_add", check=check)
# Perform actions depending on reaction.
if reaction.emoji == Paginator.emojis[0]:
self.index = 0
elif reaction.emoji == Paginator.emojis[1]:
if 0 <= self.index - 1 < len(self.embeds):
self.index -= 1
elif reaction.emoji == Paginator.emojis[2]:
if 0 <= self.index + 1 < len(self.embeds):
self.index += 1
elif reaction.emoji == Paginator.emojis[3]:
self.index = len(self.embeds) - 1
elif reaction.emoji == Paginator.emojis[4]:
await self.message.clear_reactions()
embed = Embeds.status(success=True, desc="Paginator closed.")
return await self.message.edit(embed=embed)
# Edit the message to reflect any changes.
await self.message.edit(embed=self.embeds[self.index])
await self.message.remove_reaction(reaction, user)
| 37.490826
| 193
| 0.595253
|
from discord.ext import commands
import datetime as dt
import typing as t
import discord
import asyncio
import re
class Colours:
regular = 0xF5CC00
success = 0x00C92C
failure = 0xFF3300
gaming = 0x0095FF
class Regexes:
urls = re.compile(
r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
)
markdown = re.compile(r"([*_~`|>])")
@classmethod
def url(cls, string: str) -> bool:
# Return whether a given string is a URL or not.
return bool(re.match(cls.urls, string))
@classmethod
def escapemd(cls, string: str) -> str:
# Return a string with escaped markdown characters.
return cls.markdown.sub(r"\\\1", string)
class Icons:
tick = "https://upload.wikimedia.org/wikipedia/commons/thumb/7/73/Flat_tick_icon.svg/500px-Flat_tick_icon.svg.png"
cross = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/8f/Flat_cross_icon.svg/500px-Flat_cross_icon.svg.png"
info = "https://icon-library.com/images/info-icon-svg/info-icon-svg-5.jpg"
illuminati = "https://upload.wikimedia.org/wikipedia/commons/a/a9/Illuminati_triangle_eye.png"
rfa = "https://upload.wikimedia.org/wikipedia/commons/4/40/Radio_Free_Asia_%28logo%29.png"
wikipedia = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/80/Wikipedia-logo-v2.svg/500px-Wikipedia-logo-v2.svg.png"
class Embeds:
@staticmethod
def status(success: bool, desc: str) -> discord.Embed:
# Select colours/icon/title for success or failure embeds.
status = "Operation successful!" if success else "Operation failed!"
colour = Colours.success if success else Colours.failure
icon = Icons.tick if success else Icons.cross
# Create embed and set relevant data before returning.
embed = discord.Embed(colour=colour, timestamp=dt.datetime.utcnow())
embed.set_footer(text=status, icon_url=icon)
if desc is not None:
embed.description = desc
return embed
@staticmethod
def now() -> dt.datetime:
# Return current UTC time.
return dt.datetime.utcnow()
class Choices:
emojis = ["1️⃣", "2️⃣", "3️⃣", "4️⃣", "5️⃣", "6️⃣", "7️⃣", "8️⃣", "9️⃣"]
special = ["❌"]
@classmethod
async def prompt(
cls, ctx: commands.Context, embed: discord.Embed, n: int, author_only: bool
) -> t.Optional[int]:
# List of available reactions, including any special control emojis.
options = list(cls.emojis)[: min(n, len(cls.emojis))] + Choices.special
# Lambda check to ensure that the reaction/author/message is correct.
check = (
lambda e, u: e.emoji in options
and u == ctx.author
and e.message.id == msg.id
if author_only
else lambda e, u: e.emoji in options and e.message.id == msg.id
)
# Send the embed and add reactions.
msg = await ctx.send(embed=embed)
for emoji in options:
await msg.add_reaction(emoji)
try: # Await a response from the user.
reaction, user = await ctx.bot.wait_for(
"reaction_add", timeout=30, check=check
)
except asyncio.TimeoutError:
fail = Embeds.status(success=False, desc="Timeout reached (30 seconds).")
await msg.clear_reactions()
await msg.edit(embed=fail)
return None
# Get the corresponding value.
await msg.delete()
if reaction.emoji == Choices.special[0]:
return None
return Choices.emojis.index(reaction.emoji)
class Paginator:
emojis = ["⏮", "◀", "▶", "⏭", "⏹"]
def __init__(
self,
embeds: t.Optional[t.List[discord.Embed]],
message: t.Optional[discord.Message],
) -> None:
self.embeds = embeds if embeds else []
self.message = message
self.index = 0
# Can't be initialised at startup.
self.users: t.List[discord.User] = None
self.template: discord.Embed = None
self.task: asyncio.Task = None
@property
def newembed(self) -> discord.Embed:
# Return a fresh template copy.
fresh = self.template.copy()
self.embeds.append(fresh)
return fresh
def add_description(self, line: str) -> None:
# Add lines while respecting the description character limit.
current = self.embeds[-1] if self.embeds else self.newembed
if len(current.description) + len(line) > 2048:
current = self.newembed
if current.description == discord.Embed.Empty:
current.description = line
else:
current.description += line
def add_field(self, name: str, value: str, inline: bool) -> None:
# Add fields while respecting the embed's character limit.
current = self.embeds[-1] if self.embeds else self.newembed
if len(current) + len(name) + len(value) > 6000 or len(current.fields) > 24:
current = self.newembed
current.add_field(
name=name,
value=value if len(value) < 1024 else f"{value[0:1021]}...",
inline=inline,
)
async def start(
self, ctx: commands.Context, users: t.Union[discord.User, t.List[discord.User]]
) -> None:
# Format our embeds before starting the paginator.
for index, embed in enumerate(self.embeds, 1):
if embed.footer.text != discord.Embed.Empty:
footer = f"{embed.footer.text} • "
else:
footer = ""
footer += f"Page {index}/{len(self.embeds)}"
embed.set_footer(text=footer, icon_url=embed.footer.icon_url)
# Makes sure self.message is a valid Message object.
if self.message is None:
self.message = await ctx.send(embed=self.embeds[0])
else:
await self.message.edit(embed=self.embeds[0])
# Create a task instead of using await so it doesn't block.
self.users = users if isinstance(users, list) else [users]
self.task = ctx.bot.loop.create_task(self.run(ctx=ctx))
async def stop(self) -> None:
# Cancel the paginator's task and (mostly) reset its internal state.
await self.message.clear_reactions()
self.task.cancel()
self.users = None
self.embeds = []
self.index = 0
async def run(self, ctx: commands.Context) -> None:
# Start the paginator. Only stops upon await self.stop() or a stop reaction.
check = (
lambda r, u: r.message.id == self.message.id
and r.emoji in Paginator.emojis
and u in self.users
)
for emoji in Paginator.emojis:
await self.message.add_reaction(emoji)
while True:
reaction, user = await ctx.bot.wait_for("reaction_add", check=check)
# Perform actions depending on reaction.
if reaction.emoji == Paginator.emojis[0]:
self.index = 0
elif reaction.emoji == Paginator.emojis[1]:
if 0 <= self.index - 1 < len(self.embeds):
self.index -= 1
elif reaction.emoji == Paginator.emojis[2]:
if 0 <= self.index + 1 < len(self.embeds):
self.index += 1
elif reaction.emoji == Paginator.emojis[3]:
self.index = len(self.embeds) - 1
elif reaction.emoji == Paginator.emojis[4]:
await self.message.clear_reactions()
embed = Embeds.status(success=True, desc="Paginator closed.")
return await self.message.edit(embed=embed)
# Edit the message to reflect any changes.
await self.message.edit(embed=self.embeds[self.index])
await self.message.remove_reaction(reaction, user)
| true
| true
|
1c4306ae7a77d99914614f2953aba5cc87f70902
| 49
|
py
|
Python
|
bumpv/client/logging/__init__.py
|
kylie-a/bumpversion
|
13a150daa02f29e7dd74b5240c54c7929ec176b8
|
[
"MIT"
] | null | null | null |
bumpv/client/logging/__init__.py
|
kylie-a/bumpversion
|
13a150daa02f29e7dd74b5240c54c7929ec176b8
|
[
"MIT"
] | null | null | null |
bumpv/client/logging/__init__.py
|
kylie-a/bumpversion
|
13a150daa02f29e7dd74b5240c54c7929ec176b8
|
[
"MIT"
] | 1
|
2019-11-24T15:36:19.000Z
|
2019-11-24T15:36:19.000Z
|
from .logging import get_logger_list, get_logger
| 24.5
| 48
| 0.857143
|
from .logging import get_logger_list, get_logger
| true
| true
|
1c4306e24fd9a2e2378f9454b22e4e8649dd41f6
| 6,543
|
py
|
Python
|
Segger/quaternion.py
|
gregdp/segger
|
d4c112fd43f0b088145e225f976335800874ebe5
|
[
"MIT"
] | 6
|
2019-03-27T22:53:12.000Z
|
2021-11-19T09:02:05.000Z
|
Segger/quaternion.py
|
gregdp/segger
|
d4c112fd43f0b088145e225f976335800874ebe5
|
[
"MIT"
] | 1
|
2017-03-07T16:52:30.000Z
|
2019-11-25T21:37:21.000Z
|
Segger/quaternion.py
|
gregdp/segger
|
d4c112fd43f0b088145e225f976335800874ebe5
|
[
"MIT"
] | 5
|
2019-05-30T19:10:01.000Z
|
2022-02-09T07:04:59.000Z
|
import chimera
import numpy
class Quaternion :
def __init__ ( self, s=1.0, v=chimera.Vector(0,0,0) ) :
self.s = s
self.v = v
def length (self) :
return numpy.sqrt ( (self.s*self.s) + self.v.sqlength() )
def rotation (self, angDegrees, axis) :
angRad = 0.5 * angDegrees * numpy.pi / 180.0
self.s = numpy.cos ( angRad )
self.v = axis * numpy.sin ( angRad )
def inverse ( self ) :
return Quaternion ( self.s, self.v * -1.0 )
def fromXform ( self, xf ) :
axis, angle = xf.getRotation ()
if angle >= -180.0 and angle <= 180.0 :
self.rotation ( angle, axis )
elif angle < -180.0 :
blah
self.rotation ( angle, axis*-1.0 )
else :
blah
self.rotation ( angle, axis*-1.0 )
m = numpy.reshape ( xf.getOpenGLMatrix(), (4,4) )
m = numpy.transpose ( m )
self.fromMatrix ( m )
def dot ( self, q ) :
return self.s * q.s + self.v * q.v
def angleTo ( self, q2 ) :
self.normalize()
q2.normalize()
return 2.0 * numpy.arccos ( self * q2 )
def normalize (self) :
l = self.length()
if (l > 1e-4) :
self.s = self.s / l
self.v = self.v / l
else :
raise ("quaternion normalization error")
def __mul__(self, x) :
if type(x) == type(1.0) or type(x) == numpy.float64 :
return Quaternion ( self.s*x, self.v*x )
else :
return self.dot ( x )
def __add__(self, x) :
return Quaternion ( self.s + x.s, self.v + x.v )
def __sub__(self, x) :
return Quaternion ( self.s - x.s, self.v - x.v )
def __copy__ (self) :
return Quaternion ( self.s, self.v.__copy__() )
def Xform (self) :
#self.normalize()
s = self.s
v = self.v
return chimera.Xform.xform (
1-2*v.y*v.y-2*v.z*v.z, 2*v.x*v.y-2*s*v.z, 2*v.x*v.z+2*s*v.y, 0,
2*v.x*v.y+2*s*v.z, 1-2*v.x*v.x-2*v.z*v.z, 2*v.y*v.z-2*s*v.x, 0,
2*v.x*v.z-2*s*v.y, 2*v.y*v.z+2*s*v.x, 1-2*v.x*v.x-2*v.y*v.y, 0
)
def matrix (self) :
#self.normalize()
s = self.s
v = self.v
return [
[1-2*v.y*v.y-2*v.z*v.z, 2*v.x*v.y-2*s*v.z, 2*v.x*v.z+2*s*v.y],
[2*v.x*v.y+2*s*v.z, 1-2*v.x*v.x-2*v.z*v.z, 2*v.y*v.z-2*s*v.x],
[2*v.x*v.z-2*s*v.y, 2*v.y*v.z+2*s*v.x, 1-2*v.x*v.x-2*v.y*v.y],
]
def fromMatrix ( self, rkRot ) :
# Algorithm in Ken Shoemake's article in 1987 SIGGRAPH course notes
# article "Quaternion Calculus and Fast Animation".
fTrace = rkRot[0,0] + rkRot[1,1] + rkRot[2,2]
fRoot = 0.0
if fTrace > 0.0 :
# |w| > 1/2, may as well choose w > 1/2
fRoot = numpy.sqrt (fTrace + 1.0) # 2w
self.s = 0.5 * fRoot;
fRoot = 0.5 / fRoot; # 1/(4w)
self.v[0] = (rkRot[2,1]-rkRot[1,2])*fRoot;
self.v[1] = (rkRot[0,2]-rkRot[2,0])*fRoot;
self.v[2] = (rkRot[1,0]-rkRot[0,1])*fRoot;
else :
# |w| <= 1/2
i = 0
if rkRot[1,1] > rkRot[0,0] :
i = 1
if rkRot[2,2] > rkRot[i,i] :
i = 2
j = (i + 1) % 3 # ms_iNext[i];
k = (j + 1) % 3 # ms_iNext[j];
fRoot = numpy.sqrt(rkRot[i,i]-rkRot[j,j]-rkRot[k,k]+1.0);
# Real* apfQuat[3] = { &m_afTuple[1], &m_afTuple[2], &m_afTuple[3] };
self.v[i] = 0.5 * fRoot # *apfQuat[i] = ((Real)0.5)*fRoot;
fRoot = 0.5 / fRoot
self.s = (rkRot[k,j]-rkRot[j,k])*fRoot
self.v[j] = (rkRot[j,i]+rkRot[i,j])*fRoot # *apfQuat[j]
self.v[k] = (rkRot[k,i]+rkRot[i,k])*fRoot # *apfQuat[k]
def mult (a, b) :
return Quaternion (a.s*b.s - a.v*b.v, b.v*a.s + a.v*b.s + chimera.cross(a.v,b.v))
def slerp0 (p, q, t) :
cs = p.dot(q)
angle = numpy.arccos ( cs )
if abs (angle) > 0.0 :
sn = numpy.sin ( angle )
invSn = 1.0 / sn;
tAngle = t*angle;
c0 = numpy.sin(angle - tAngle)*invSn;
c1 = numpy.sin(tAngle)*invSn;
#mTuple[0] = coeff0*p.mTuple[0] + coeff1*q.mTuple[0];
#mTuple[1] = coeff0*p.mTuple[1] + coeff1*q.mTuple[1];
#mTuple[2] = coeff0*p.mTuple[2] + coeff1*q.mTuple[2];
#mTuple[3] = coeff0*p.mTuple[3] + coeff1*q.mTuple[3];
return Quaternion (p.s*c0+q.s*c1, p.v*c0 + q.v*c1)
else :
return Quaternion (p.s, chimera.Vector(p.v[0], p.v[1], p.v[2]))
def slerp (v0, v1, t) :
# http://number-none.com/product/Understanding%20Slerp,%20Then%20Not%20Using%20It/
#; Inputs are: unit vectors v0 and v1, scalar t
#; v0 and v1 are linearly independent
# Quaternion slerp(Quaternion const &v0, Quaternion const &v1, double t) {
# // v0 and v1 should be unit length or else
# // something broken will happen.
#
# // Compute the cosine of the angle between the two vectors.
# double dot = dot_product(v0, v1);
#
# const double DOT_THRESHOLD = 0.9995;
# if (dot > DOT_THRESHOLD) {
# // If the inputs are too close for comfort, linearly interpolate
# // and normalize the result.
#
# Quaternion result = v0 + t*(v1 - v0)
# result.normalize();
# return result;
# }
#
# Clamp(dot, -1, 1); // Robustness: Stay within domain of acos()
# double theta_0 = acos(dot); // theta_0 = angle between input vectors
# double theta = theta_0*t; // theta = angle between v0 and result
#
# Quaternion v2 = v1 - v0*dot
# v2.normalize(); // { v0, v2 } is now an orthonormal basis
#
# return v0*cos(theta) + v2*sin(theta);
dot = v0.dot(v1)
#print dot
if 1 or dot > 0.9995 :
r = v0 + (v1-v0) * t
r.normalize()
return r
if dot < -1.0 : dot = -1.0
if dot > 1.0 : dot = 1.0
theta_0 = numpy.arccos ( dot )
theta = theta_0*t
v2 = v1 - v0 * dot
v2.normalize()
r = v0 * numpy.cos(theta) + v2 * numpy.sin(theta)
if 0 :
# from http://graphics.cs.cmu.edu/nsp/course/15-464/Fall05/assignments/p245-shoemake.pdf
a0 = numpy.sin( (1-t) * theta_0 ) / numpy.sin(theta_0)
a1 = numpy.sin ( t * theta_0 ) / numpy.sin ( theta_0 )
r = v0 * a0 + v1 * a1
return r
| 29.740909
| 96
| 0.494116
|
import chimera
import numpy
class Quaternion :
def __init__ ( self, s=1.0, v=chimera.Vector(0,0,0) ) :
self.s = s
self.v = v
def length (self) :
return numpy.sqrt ( (self.s*self.s) + self.v.sqlength() )
def rotation (self, angDegrees, axis) :
angRad = 0.5 * angDegrees * numpy.pi / 180.0
self.s = numpy.cos ( angRad )
self.v = axis * numpy.sin ( angRad )
def inverse ( self ) :
return Quaternion ( self.s, self.v * -1.0 )
def fromXform ( self, xf ) :
axis, angle = xf.getRotation ()
if angle >= -180.0 and angle <= 180.0 :
self.rotation ( angle, axis )
elif angle < -180.0 :
blah
self.rotation ( angle, axis*-1.0 )
else :
blah
self.rotation ( angle, axis*-1.0 )
m = numpy.reshape ( xf.getOpenGLMatrix(), (4,4) )
m = numpy.transpose ( m )
self.fromMatrix ( m )
def dot ( self, q ) :
return self.s * q.s + self.v * q.v
def angleTo ( self, q2 ) :
self.normalize()
q2.normalize()
return 2.0 * numpy.arccos ( self * q2 )
def normalize (self) :
l = self.length()
if (l > 1e-4) :
self.s = self.s / l
self.v = self.v / l
else :
raise ("quaternion normalization error")
def __mul__(self, x) :
if type(x) == type(1.0) or type(x) == numpy.float64 :
return Quaternion ( self.s*x, self.v*x )
else :
return self.dot ( x )
def __add__(self, x) :
return Quaternion ( self.s + x.s, self.v + x.v )
def __sub__(self, x) :
return Quaternion ( self.s - x.s, self.v - x.v )
def __copy__ (self) :
return Quaternion ( self.s, self.v.__copy__() )
def Xform (self) :
s = self.s
v = self.v
return chimera.Xform.xform (
1-2*v.y*v.y-2*v.z*v.z, 2*v.x*v.y-2*s*v.z, 2*v.x*v.z+2*s*v.y, 0,
2*v.x*v.y+2*s*v.z, 1-2*v.x*v.x-2*v.z*v.z, 2*v.y*v.z-2*s*v.x, 0,
2*v.x*v.z-2*s*v.y, 2*v.y*v.z+2*s*v.x, 1-2*v.x*v.x-2*v.y*v.y, 0
)
def matrix (self) :
s = self.s
v = self.v
return [
[1-2*v.y*v.y-2*v.z*v.z, 2*v.x*v.y-2*s*v.z, 2*v.x*v.z+2*s*v.y],
[2*v.x*v.y+2*s*v.z, 1-2*v.x*v.x-2*v.z*v.z, 2*v.y*v.z-2*s*v.x],
[2*v.x*v.z-2*s*v.y, 2*v.y*v.z+2*s*v.x, 1-2*v.x*v.x-2*v.y*v.y],
]
def fromMatrix ( self, rkRot ) :
# article "Quaternion Calculus and Fast Animation".
fTrace = rkRot[0,0] + rkRot[1,1] + rkRot[2,2]
fRoot = 0.0
if fTrace > 0.0 :
# |w| > 1/2, may as well choose w > 1/2
fRoot = numpy.sqrt (fTrace + 1.0) # 2w
self.s = 0.5 * fRoot;
fRoot = 0.5 / fRoot; # 1/(4w)
self.v[0] = (rkRot[2,1]-rkRot[1,2])*fRoot;
self.v[1] = (rkRot[0,2]-rkRot[2,0])*fRoot;
self.v[2] = (rkRot[1,0]-rkRot[0,1])*fRoot;
else :
# |w| <= 1/2
i = 0
if rkRot[1,1] > rkRot[0,0] :
i = 1
if rkRot[2,2] > rkRot[i,i] :
i = 2
j = (i + 1) % 3 # ms_iNext[i];
k = (j + 1) % 3 # ms_iNext[j];
fRoot = numpy.sqrt(rkRot[i,i]-rkRot[j,j]-rkRot[k,k]+1.0);
# Real* apfQuat[3] = { &m_afTuple[1], &m_afTuple[2], &m_afTuple[3] };
self.v[i] = 0.5 * fRoot # *apfQuat[i] = ((Real)0.5)*fRoot;
fRoot = 0.5 / fRoot
self.s = (rkRot[k,j]-rkRot[j,k])*fRoot
self.v[j] = (rkRot[j,i]+rkRot[i,j])*fRoot # *apfQuat[j]
self.v[k] = (rkRot[k,i]+rkRot[i,k])*fRoot # *apfQuat[k]
def mult (a, b) :
return Quaternion (a.s*b.s - a.v*b.v, b.v*a.s + a.v*b.s + chimera.cross(a.v,b.v))
def slerp0 (p, q, t) :
cs = p.dot(q)
angle = numpy.arccos ( cs )
if abs (angle) > 0.0 :
sn = numpy.sin ( angle )
invSn = 1.0 / sn;
tAngle = t*angle;
c0 = numpy.sin(angle - tAngle)*invSn;
c1 = numpy.sin(tAngle)*invSn;
#mTuple[0] = coeff0*p.mTuple[0] + coeff1*q.mTuple[0];
#mTuple[1] = coeff0*p.mTuple[1] + coeff1*q.mTuple[1];
#mTuple[2] = coeff0*p.mTuple[2] + coeff1*q.mTuple[2];
#mTuple[3] = coeff0*p.mTuple[3] + coeff1*q.mTuple[3];
return Quaternion (p.s*c0+q.s*c1, p.v*c0 + q.v*c1)
else :
return Quaternion (p.s, chimera.Vector(p.v[0], p.v[1], p.v[2]))
def slerp (v0, v1, t) :
# http://number-none.com/product/Understanding%20Slerp,%20Then%20Not%20Using%20It/
#; Inputs are: unit vectors v0 and v1, scalar t
#; v0 and v1 are linearly independent
# Quaternion slerp(Quaternion const &v0, Quaternion const &v1, double t) {
# // v0 and v1 should be unit length or else
# // something broken will happen.
#
# // Compute the cosine of the angle between the two vectors.
# double dot = dot_product(v0, v1);
#
# const double DOT_THRESHOLD = 0.9995;
# if (dot > DOT_THRESHOLD) {
# // If the inputs are too close for comfort, linearly interpolate
# // and normalize the result.
#
# Quaternion result = v0 + t*(v1 - v0)
# result.normalize();
# return result;
# }
#
# Clamp(dot, -1, 1); // Robustness: Stay within domain of acos()
# double theta_0 = acos(dot); // theta_0 = angle between input vectors
# double theta = theta_0*t; // theta = angle between v0 and result
#
# Quaternion v2 = v1 - v0*dot
# v2.normalize(); // { v0, v2 } is now an orthonormal basis
#
# return v0*cos(theta) + v2*sin(theta);
dot = v0.dot(v1)
#print dot
if 1 or dot > 0.9995 :
r = v0 + (v1-v0) * t
r.normalize()
return r
if dot < -1.0 : dot = -1.0
if dot > 1.0 : dot = 1.0
theta_0 = numpy.arccos ( dot )
theta = theta_0*t
v2 = v1 - v0 * dot
v2.normalize()
r = v0 * numpy.cos(theta) + v2 * numpy.sin(theta)
if 0 :
# from http://graphics.cs.cmu.edu/nsp/course/15-464/Fall05/assignments/p245-shoemake.pdf
a0 = numpy.sin( (1-t) * theta_0 ) / numpy.sin(theta_0)
a1 = numpy.sin ( t * theta_0 ) / numpy.sin ( theta_0 )
r = v0 * a0 + v1 * a1
return r
| true
| true
|
1c430821f923be31c49db28013f3a7692f993b15
| 7,542
|
py
|
Python
|
pychron/core/fits/measurement_fits_selector.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | null | null | null |
pychron/core/fits/measurement_fits_selector.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 80
|
2018-07-17T20:10:20.000Z
|
2021-08-17T15:38:24.000Z
|
pychron/core/fits/measurement_fits_selector.py
|
UManPychron/pychron
|
b84c9fd70072f9cbda30abe2c471e64fe3dd75d8
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import ast
import os
import yaml
from traits.api import Str, Button, List
from traitsui.api import HGroup, UItem, VGroup, Item
from traitsui.extras.checkbox_column import CheckboxColumn
from traitsui.handler import Controller
from traitsui.table_column import ObjectColumn
from pychron.core.fits.filter_fit_selector import FilterFitSelector
from pychron.core.fits.fit import FilterFit
from pychron.core.helpers.filetools import add_extension, glob_list_directory
from pychron.core.helpers.iterfuncs import partition
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.ui.enum_editor import myEnumEditor
from pychron.core.ui.table_editor import myTableEditor
from pychron.core.yaml import yload
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.paths import paths
class MeasurementFit(FilterFit):
is_baseline = False
ATTRS = ['fit', 'error_type', 'name', 'filter_outliers', 'filter_iterations', 'filter_std_devs']
class MeasurementFitsSelector(FilterFitSelector):
fit_klass = MeasurementFit
name = Str(auto_set=False, enter_set=True)
available_names = List
def __init__(self, *args, **kw):
super(MeasurementFitsSelector, self).__init__(*args, **kw)
self._load_available_names()
def _name_changed(self, new):
if new:
self._load_name(new)
def _load_name(self, name):
self.load(os.path.join(paths.fits_dir, add_extension(name, '.yaml')))
def duplicate(self):
self.save()
self._load_available_names()
self._load_name(self.name)
def open(self, script_path):
dfp = self._extract_default_fits_file(script_path)
if dfp:
self.load(os.path.join(paths.fits_dir, add_extension(dfp, '.yaml')))
def save(self, name=None):
if name is None:
name = self.name
bfs, sfs = partition(self.fits, lambda x: x.is_baseline)
yd = {'signal': self._dump(sfs),
'baseline': self._dump(bfs)}
p = os.path.join(paths.fits_dir, '{}.yaml'.format(name))
with open(p, 'w') as wfile:
yaml.dump(yd, wfile, default_flow_style=False)
def load(self, p):
if not os.path.isfile(p):
return
yd = yload(p)
fits = self._load_fits(yd['signal'])
fits.extend(self._load_fits(yd['baseline'], is_baseline=True))
self.fits = fits
h, _ = os.path.splitext(os.path.basename(p))
self.name = h
def _load_available_names(self):
ps = glob_list_directory(paths.fits_dir, extension='.yaml', remove_extension=True)
self.available_names = ps
def _extract_default_fits_file(self, path):
with open(path, 'r') as rfile:
m = ast.parse(rfile.read())
docstr = ast.get_docstring(m)
yd = yload(docstr)
if yd:
return yd.get('default_fits', None)
def _dump(self, fs):
ys = []
for fi in fs:
d = {ai: getattr(fi, ai) for ai in ATTRS}
ys.append(d)
return ys
def _load_fits(self, fs, is_baseline=False):
fits = []
for fi in fs:
d = {ai: fi[ai] for ai in ATTRS}
f = MeasurementFit(is_baseline=is_baseline, **d)
fits.append(f)
return fits
class MeasurementFitsSelectorView(Controller):
duplicate_button = Button
def _duplicate_button_fired(self):
info = self.model.edit_traits(view=okcancel_view(Item('name'),
title='Enter a new name',
width=300,
kind='modal'))
if info.result:
self.model.duplicate()
def closed(self, info, is_ok):
if is_ok:
self.model.save()
def _get_toggle_group(self):
g = HGroup(
UItem('filter_all_button'), )
return g
def _get_auto_group(self):
return HGroup(UItem('global_fit', editor=myEnumEditor(name='fit_types')),
UItem('global_error_type', editor=myEnumEditor(name='error_types')))
def _get_fit_group(self):
cols = [ObjectColumn(name='name', editable=False,
tooltip='If name is an isotope e.g Ar40 '
'fit is for a signal, if name is a detector e.g H1 fit is for a baseline'),
ObjectColumn(name='fit',
editor=myEnumEditor(name='fit_types'),
width=75),
ObjectColumn(name='error_type',
editor=myEnumEditor(name='error_types'),
label='Error',
width=75),
CheckboxColumn(name='filter_outliers', label='Out.'),
ObjectColumn(name='filter_iterations', label='Iter.'),
ObjectColumn(name='filter_std_devs', label='NSigma'),
CheckboxColumn(name='use_standard_deviation_filtering', label='Use SD'),
CheckboxColumn(name='use_iqr_filtering', label='Use IQR')
]
editor = myTableEditor(columns=cols,
selected='selected',
selection_mode='rows',
sortable=False,
edit_on_first_click=False,
clear_selection_on_dclicked=True,
on_command_key=self._update_command_key, )
grp = UItem('fits',
style='custom',
editor=editor)
return grp
def traits_view(self):
name_grp = HGroup(
UItem('name', editor=myEnumEditor(name='available_names')),
icon_button_editor('controller.duplicate_button', 'duplicate'))
v = okcancel_view(VGroup(name_grp,
self._get_toggle_group(),
self._get_auto_group(),
self._get_fit_group()),
height=400,
title='Edit Default Fits')
return v
if __name__ == '__main__':
# build_directories(paths)
m = MeasurementFitsSelector()
# keys = ['Ar40', 'Ar39']
# detectors=['H1','AX']
# fits = [('linear', 'SEM', {}),
# ('linear', 'SEM', {})]
t = os.path.join(paths.fits_dir, 'test.yaml')
m.load(t)
a = MeasurementFitsSelectorView(model=m)
a.configure_traits()
# ============= EOF =============================================
| 36.086124
| 112
| 0.570936
|
from __future__ import absolute_import
import ast
import os
import yaml
from traits.api import Str, Button, List
from traitsui.api import HGroup, UItem, VGroup, Item
from traitsui.extras.checkbox_column import CheckboxColumn
from traitsui.handler import Controller
from traitsui.table_column import ObjectColumn
from pychron.core.fits.filter_fit_selector import FilterFitSelector
from pychron.core.fits.fit import FilterFit
from pychron.core.helpers.filetools import add_extension, glob_list_directory
from pychron.core.helpers.iterfuncs import partition
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.ui.enum_editor import myEnumEditor
from pychron.core.ui.table_editor import myTableEditor
from pychron.core.yaml import yload
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.paths import paths
class MeasurementFit(FilterFit):
is_baseline = False
ATTRS = ['fit', 'error_type', 'name', 'filter_outliers', 'filter_iterations', 'filter_std_devs']
class MeasurementFitsSelector(FilterFitSelector):
fit_klass = MeasurementFit
name = Str(auto_set=False, enter_set=True)
available_names = List
def __init__(self, *args, **kw):
super(MeasurementFitsSelector, self).__init__(*args, **kw)
self._load_available_names()
def _name_changed(self, new):
if new:
self._load_name(new)
def _load_name(self, name):
self.load(os.path.join(paths.fits_dir, add_extension(name, '.yaml')))
def duplicate(self):
self.save()
self._load_available_names()
self._load_name(self.name)
def open(self, script_path):
dfp = self._extract_default_fits_file(script_path)
if dfp:
self.load(os.path.join(paths.fits_dir, add_extension(dfp, '.yaml')))
def save(self, name=None):
if name is None:
name = self.name
bfs, sfs = partition(self.fits, lambda x: x.is_baseline)
yd = {'signal': self._dump(sfs),
'baseline': self._dump(bfs)}
p = os.path.join(paths.fits_dir, '{}.yaml'.format(name))
with open(p, 'w') as wfile:
yaml.dump(yd, wfile, default_flow_style=False)
def load(self, p):
if not os.path.isfile(p):
return
yd = yload(p)
fits = self._load_fits(yd['signal'])
fits.extend(self._load_fits(yd['baseline'], is_baseline=True))
self.fits = fits
h, _ = os.path.splitext(os.path.basename(p))
self.name = h
def _load_available_names(self):
ps = glob_list_directory(paths.fits_dir, extension='.yaml', remove_extension=True)
self.available_names = ps
def _extract_default_fits_file(self, path):
with open(path, 'r') as rfile:
m = ast.parse(rfile.read())
docstr = ast.get_docstring(m)
yd = yload(docstr)
if yd:
return yd.get('default_fits', None)
def _dump(self, fs):
ys = []
for fi in fs:
d = {ai: getattr(fi, ai) for ai in ATTRS}
ys.append(d)
return ys
def _load_fits(self, fs, is_baseline=False):
fits = []
for fi in fs:
d = {ai: fi[ai] for ai in ATTRS}
f = MeasurementFit(is_baseline=is_baseline, **d)
fits.append(f)
return fits
class MeasurementFitsSelectorView(Controller):
duplicate_button = Button
def _duplicate_button_fired(self):
info = self.model.edit_traits(view=okcancel_view(Item('name'),
title='Enter a new name',
width=300,
kind='modal'))
if info.result:
self.model.duplicate()
def closed(self, info, is_ok):
if is_ok:
self.model.save()
def _get_toggle_group(self):
g = HGroup(
UItem('filter_all_button'), )
return g
def _get_auto_group(self):
return HGroup(UItem('global_fit', editor=myEnumEditor(name='fit_types')),
UItem('global_error_type', editor=myEnumEditor(name='error_types')))
def _get_fit_group(self):
cols = [ObjectColumn(name='name', editable=False,
tooltip='If name is an isotope e.g Ar40 '
'fit is for a signal, if name is a detector e.g H1 fit is for a baseline'),
ObjectColumn(name='fit',
editor=myEnumEditor(name='fit_types'),
width=75),
ObjectColumn(name='error_type',
editor=myEnumEditor(name='error_types'),
label='Error',
width=75),
CheckboxColumn(name='filter_outliers', label='Out.'),
ObjectColumn(name='filter_iterations', label='Iter.'),
ObjectColumn(name='filter_std_devs', label='NSigma'),
CheckboxColumn(name='use_standard_deviation_filtering', label='Use SD'),
CheckboxColumn(name='use_iqr_filtering', label='Use IQR')
]
editor = myTableEditor(columns=cols,
selected='selected',
selection_mode='rows',
sortable=False,
edit_on_first_click=False,
clear_selection_on_dclicked=True,
on_command_key=self._update_command_key, )
grp = UItem('fits',
style='custom',
editor=editor)
return grp
def traits_view(self):
name_grp = HGroup(
UItem('name', editor=myEnumEditor(name='available_names')),
icon_button_editor('controller.duplicate_button', 'duplicate'))
v = okcancel_view(VGroup(name_grp,
self._get_toggle_group(),
self._get_auto_group(),
self._get_fit_group()),
height=400,
title='Edit Default Fits')
return v
if __name__ == '__main__':
m = MeasurementFitsSelector()
t = os.path.join(paths.fits_dir, 'test.yaml')
m.load(t)
a = MeasurementFitsSelectorView(model=m)
a.configure_traits()
| true
| true
|
1c4308f87fe5f938447cd5436faba60db58264e4
| 1,275
|
py
|
Python
|
analysis/clustering-initial-attempts/clustering-attempt-1/sort_clusters.py
|
BogDAAAMN/dark-patterns
|
0335a1ad88316a05a9243e6a77ab79a0c2d06f12
|
[
"Apache-2.0"
] | 98
|
2018-12-20T15:04:38.000Z
|
2022-03-08T05:08:47.000Z
|
analysis/clustering-initial-attempts/clustering-attempt-1/sort_clusters.py
|
BogDAAAMN/dark-patterns
|
0335a1ad88316a05a9243e6a77ab79a0c2d06f12
|
[
"Apache-2.0"
] | 35
|
2018-07-27T16:09:46.000Z
|
2019-01-31T16:09:14.000Z
|
analysis/clustering-initial-attempts/clustering-attempt-1/sort_clusters.py
|
TheCGO/dark-patterns
|
f458f19c4814419acd691f2842d7e1123f14097c
|
[
"Apache-2.0"
] | 19
|
2018-12-20T15:04:41.000Z
|
2021-11-09T13:53:24.000Z
|
from __future__ import print_function
from tqdm import tqdm
import json
import os.path
import sys
usage = 'Usage: python %s CLUSTERS-FILE OUT-FILE' % __file__
help_message = '''Sorts clusters in the provided file by size, with largest first.
Clusters should be formatted in the same way as accepted by cluster_browser.py. Specify
the name of the output file as OUT-FILE.'''
if __name__ == '__main__':
# Check usage
if len(sys.argv[1:]) != 2:
print(usage)
print()
print(help_message)
sys.exit(1)
clusters_file = sys.argv[1]
out_file = sys.argv[2]
if not os.path.isfile(clusters_file):
print('Error: Clusters file not found: %s' % clusters_file)
sys.exit(1)
if os.path.isfile(out_file):
print('Error: output file already exists. Exiting to avoid overwriting: %s' % out_file)
sys.exit(1)
print('Reading in clusters...')
clusters = []
with open(clusters_file, 'r') as f:
for line in tqdm(f):
c = json.loads(line)
clusters.append(c)
print('Sorting clusters...')
clusters_sort = sorted(clusters, cmp=lambda x, y: len(y[y.keys()[0]]) - len(x[x.keys()[0]]))
print('Writing sorted clusters to file...')
with open(out_file, 'w') as f:
for c in tqdm(clusters_sort):
f.write(json.dumps(c) + '\n')
| 29.651163
| 94
| 0.675294
|
from __future__ import print_function
from tqdm import tqdm
import json
import os.path
import sys
usage = 'Usage: python %s CLUSTERS-FILE OUT-FILE' % __file__
help_message = '''Sorts clusters in the provided file by size, with largest first.
Clusters should be formatted in the same way as accepted by cluster_browser.py. Specify
the name of the output file as OUT-FILE.'''
if __name__ == '__main__':
if len(sys.argv[1:]) != 2:
print(usage)
print()
print(help_message)
sys.exit(1)
clusters_file = sys.argv[1]
out_file = sys.argv[2]
if not os.path.isfile(clusters_file):
print('Error: Clusters file not found: %s' % clusters_file)
sys.exit(1)
if os.path.isfile(out_file):
print('Error: output file already exists. Exiting to avoid overwriting: %s' % out_file)
sys.exit(1)
print('Reading in clusters...')
clusters = []
with open(clusters_file, 'r') as f:
for line in tqdm(f):
c = json.loads(line)
clusters.append(c)
print('Sorting clusters...')
clusters_sort = sorted(clusters, cmp=lambda x, y: len(y[y.keys()[0]]) - len(x[x.keys()[0]]))
print('Writing sorted clusters to file...')
with open(out_file, 'w') as f:
for c in tqdm(clusters_sort):
f.write(json.dumps(c) + '\n')
| true
| true
|
1c43093fa85de4f6e1de23a0ecc3b43530f42260
| 126
|
py
|
Python
|
sourcecode/GAN/FID/__init__.py
|
toufeeqahamedns/GeneratingHumanFaces
|
93048bf5f6ae99424f918b0d0fea46d21abee0cb
|
[
"MIT"
] | null | null | null |
sourcecode/GAN/FID/__init__.py
|
toufeeqahamedns/GeneratingHumanFaces
|
93048bf5f6ae99424f918b0d0fea46d21abee0cb
|
[
"MIT"
] | null | null | null |
sourcecode/GAN/FID/__init__.py
|
toufeeqahamedns/GeneratingHumanFaces
|
93048bf5f6ae99424f918b0d0fea46d21abee0cb
|
[
"MIT"
] | null | null | null |
""" Package has implementation for the FID score calculation
"""
from GAN.FID import fid_score
from GAN.FID import inception
| 21
| 60
| 0.785714
|
from GAN.FID import fid_score
from GAN.FID import inception
| true
| true
|
1c4309663a6b321e33289d53fa1cdd98849e4918
| 42
|
py
|
Python
|
docnetdb/examples/__init__.py
|
fsabre/DocNetDB
|
c749a345e644b63219bd544967bed563299fd42c
|
[
"MIT"
] | 4
|
2020-01-27T13:10:58.000Z
|
2020-09-12T12:10:22.000Z
|
docnetdb/examples/__init__.py
|
fsabre/DocNetDB
|
c749a345e644b63219bd544967bed563299fd42c
|
[
"MIT"
] | null | null | null |
docnetdb/examples/__init__.py
|
fsabre/DocNetDB
|
c749a345e644b63219bd544967bed563299fd42c
|
[
"MIT"
] | null | null | null |
"""This package defines some examples."""
| 21
| 41
| 0.714286
| true
| true
|
|
1c430a1d102e128e139d8c74d14944d8d32fb967
| 4,458
|
py
|
Python
|
Main/AlphaZero/DistributedSelfPlay/SelfPlay.py
|
ikaroszhang96/Convex-AlphaZero
|
d96c9790529e48ff4e2ec34649bdc312a0abcc53
|
[
"MIT"
] | null | null | null |
Main/AlphaZero/DistributedSelfPlay/SelfPlay.py
|
ikaroszhang96/Convex-AlphaZero
|
d96c9790529e48ff4e2ec34649bdc312a0abcc53
|
[
"MIT"
] | null | null | null |
Main/AlphaZero/DistributedSelfPlay/SelfPlay.py
|
ikaroszhang96/Convex-AlphaZero
|
d96c9790529e48ff4e2ec34649bdc312a0abcc53
|
[
"MIT"
] | null | null | null |
from Main.AlphaZero.DistributedSelfPlay import Constants
from Main.Training.Connect4 import MemoryBuffers
from Main import Hyperparameters
import multiprocessing as mp
import numpy as np
import time
'''
Listen for data from the Remote Worker and forward it to the Replay Watcher.
Every worker will continue to work until the pre-determined number of games has been collected.
After the Remote Workers have been aborted by the Replay Watcher, the will message the listener and the listener quits
'''
def _waitForWorker(connection, dumpPipe):
gamesCollected = 0
collectingDataFromWorker = True
while (collectingDataFromWorker):
msg, data = connection.readMessage()
dumpPipe.put((msg, data))
if (msg == Constants.RemoteProtocol.DUMP_VISITED_STATES_TO_OVERLORD):
collectingDataFromWorker = False
elif (msg == Constants.RemoteProtocol.DUMP_REPLAY_DATA_TO_OVERLORD):
amountOfGames = data[0]
gamesCollected += amountOfGames
print("Worker Finished: {} Amount of Games: {}".format(connection.id, gamesCollected))
def _stopRemoteWorkers(connections):
print("Aborting remoteWorkers")
for c in connections:
c.sendMessage(Constants.RemoteProtocol.OVERLORD_REPLAY_BUFFER_FULL, ("",))
# Collect data from all listeners and upon reaching a pre-determined number of games abort all Remote Workers
# As the main data is stored at the Looping Trainer we clear the Replay Buffer at the start
def _replayWatcher(connections, dumpPipe):
print("Starting replay watcher")
collectedGamesThisCycle = 0
MemoryBuffers.clearReplayBuffer()
startTimeSelfPlay = time.time()
while (True):
msg, data = dumpPipe.get() # Data passed from a listener
if (msg == Constants.RemoteProtocol.DUMP_REPLAY_DATA_TO_OVERLORD):
amountOfGames, states, evals, polices, weights = data
MemoryBuffers.addLabelsToReplayBuffer(states, evals, polices)
collectedGamesThisCycle += amountOfGames
# Display a formatted message
cycleProgressMsg = "{} / {}".format(collectedGamesThisCycle, Hyperparameters.AMOUNT_OF_NEW_GAMES_PER_CYCLE)
elapsedTime = np.around(time.time() - startTimeSelfPlay, 3)
elapsedTimeMsg = "Time: {}".format(elapsedTime)
gamesPerSecondMsg = "Games/Sec: {}".format(np.around(collectedGamesThisCycle / elapsedTime, 3))
print(cycleProgressMsg + "\t\t" + elapsedTimeMsg + "\t\t" + gamesPerSecondMsg)
# Upon receving sufficent number of games we send a message to all Remote Workers to abort
if (collectedGamesThisCycle >= Hyperparameters.AMOUNT_OF_NEW_GAMES_PER_CYCLE):
_stopRemoteWorkers(connections)
return
'''
*** CURRENTLY INNACTIVATED ***
The argmax scheduele deceides at what point in a game we start playing deterministicly according to the policy .
'''
def _getCurrentArgMaxLevel(modelGeneration):
for a in Hyperparameters.ARG_MAX_SCHEDULE:
cycleNumber, argMaxLevel = a
if (modelGeneration < cycleNumber):
return argMaxLevel
_, finalArgMaxLevel = Hyperparameters.ARG_MAX_SCHEDULE[-1]
return finalArgMaxLevel
'''
Broadcast the current: (Network Parameters, MCTS simulations per move, ArgMax schedule) to all Remote Workers.
Then start a listener for every worker that collects game data.
These listeners forwards the collected data to the Replay Watcher
Finishes after a fixed number of games.
'''
def selfPlay(workerConnections, modelAsBytes, modelGeneration):
t1 = time.time() # Only used for displaying elapsed time to the user
argMaxLevel = _getCurrentArgMaxLevel(modelGeneration)
workerCounter = 0
for c in workerConnections:
c.sendMessage(Constants.RemoteProtocol.START_SELF_PLAY,
(workerCounter, modelAsBytes, Hyperparameters.MCTS_SIMULATIONS_PER_MOVE, argMaxLevel))
workerCounter += 1
print("Sending out models finished:", time.time() - t1)
# Start a listener for every remote worker
dumpPipe = mp.Queue()
procs = [mp.Process(target=_waitForWorker, args=(c, dumpPipe)) for c in workerConnections]
for p in procs:
p.start()
# Wait until all listeners have reported that they have finished, then stop all Remote Workers
_replayWatcher(workerConnections, dumpPipe)
print("Self-Play finished: {}".format(time.time() - t1))
| 40.527273
| 119
| 0.721624
|
from Main.AlphaZero.DistributedSelfPlay import Constants
from Main.Training.Connect4 import MemoryBuffers
from Main import Hyperparameters
import multiprocessing as mp
import numpy as np
import time
def _waitForWorker(connection, dumpPipe):
gamesCollected = 0
collectingDataFromWorker = True
while (collectingDataFromWorker):
msg, data = connection.readMessage()
dumpPipe.put((msg, data))
if (msg == Constants.RemoteProtocol.DUMP_VISITED_STATES_TO_OVERLORD):
collectingDataFromWorker = False
elif (msg == Constants.RemoteProtocol.DUMP_REPLAY_DATA_TO_OVERLORD):
amountOfGames = data[0]
gamesCollected += amountOfGames
print("Worker Finished: {} Amount of Games: {}".format(connection.id, gamesCollected))
def _stopRemoteWorkers(connections):
print("Aborting remoteWorkers")
for c in connections:
c.sendMessage(Constants.RemoteProtocol.OVERLORD_REPLAY_BUFFER_FULL, ("",))
def _replayWatcher(connections, dumpPipe):
print("Starting replay watcher")
collectedGamesThisCycle = 0
MemoryBuffers.clearReplayBuffer()
startTimeSelfPlay = time.time()
while (True):
msg, data = dumpPipe.get()
if (msg == Constants.RemoteProtocol.DUMP_REPLAY_DATA_TO_OVERLORD):
amountOfGames, states, evals, polices, weights = data
MemoryBuffers.addLabelsToReplayBuffer(states, evals, polices)
collectedGamesThisCycle += amountOfGames
cycleProgressMsg = "{} / {}".format(collectedGamesThisCycle, Hyperparameters.AMOUNT_OF_NEW_GAMES_PER_CYCLE)
elapsedTime = np.around(time.time() - startTimeSelfPlay, 3)
elapsedTimeMsg = "Time: {}".format(elapsedTime)
gamesPerSecondMsg = "Games/Sec: {}".format(np.around(collectedGamesThisCycle / elapsedTime, 3))
print(cycleProgressMsg + "\t\t" + elapsedTimeMsg + "\t\t" + gamesPerSecondMsg)
if (collectedGamesThisCycle >= Hyperparameters.AMOUNT_OF_NEW_GAMES_PER_CYCLE):
_stopRemoteWorkers(connections)
return
def _getCurrentArgMaxLevel(modelGeneration):
for a in Hyperparameters.ARG_MAX_SCHEDULE:
cycleNumber, argMaxLevel = a
if (modelGeneration < cycleNumber):
return argMaxLevel
_, finalArgMaxLevel = Hyperparameters.ARG_MAX_SCHEDULE[-1]
return finalArgMaxLevel
def selfPlay(workerConnections, modelAsBytes, modelGeneration):
t1 = time.time()
argMaxLevel = _getCurrentArgMaxLevel(modelGeneration)
workerCounter = 0
for c in workerConnections:
c.sendMessage(Constants.RemoteProtocol.START_SELF_PLAY,
(workerCounter, modelAsBytes, Hyperparameters.MCTS_SIMULATIONS_PER_MOVE, argMaxLevel))
workerCounter += 1
print("Sending out models finished:", time.time() - t1)
dumpPipe = mp.Queue()
procs = [mp.Process(target=_waitForWorker, args=(c, dumpPipe)) for c in workerConnections]
for p in procs:
p.start()
_replayWatcher(workerConnections, dumpPipe)
print("Self-Play finished: {}".format(time.time() - t1))
| true
| true
|
1c430ac1e16c40ccad73fdb23ae4dc5bca695e2c
| 344,018
|
py
|
Python
|
ns-allinone-3.29/ns-3.29/src/core/bindings/modulegen__gcc_ILP32.py
|
tayoon/My-NS-3
|
e39bd778fe31397e048f770533c5154761bbbcb5
|
[
"MIT"
] | null | null | null |
ns-allinone-3.29/ns-3.29/src/core/bindings/modulegen__gcc_ILP32.py
|
tayoon/My-NS-3
|
e39bd778fe31397e048f770533c5154761bbbcb5
|
[
"MIT"
] | null | null | null |
ns-allinone-3.29/ns-3.29/src/core/bindings/modulegen__gcc_ILP32.py
|
tayoon/My-NS-3
|
e39bd778fe31397e048f770533c5154761bbbcb5
|
[
"MIT"
] | null | null | null |
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.core', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase')
## command-line.h (module 'core'): ns3::CommandLine [class]
module.add_class('CommandLine', allow_subclassing=True)
## system-mutex.h (module 'core'): ns3::CriticalSection [class]
module.add_class('CriticalSection')
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct]
module.add_class('DefaultDeleter', template_parameters=['ns3::AttributeAccessor'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct]
module.add_class('DefaultDeleter', template_parameters=['ns3::AttributeChecker'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct]
module.add_class('DefaultDeleter', template_parameters=['ns3::AttributeValue'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct]
module.add_class('DefaultDeleter', template_parameters=['ns3::CallbackImplBase'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl> [struct]
module.add_class('DefaultDeleter', template_parameters=['ns3::EventImpl'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct]
module.add_class('DefaultDeleter', template_parameters=['ns3::Hash::Implementation'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::SystemThread> [struct]
module.add_class('DefaultDeleter', template_parameters=['ns3::SystemThread'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct]
module.add_class('DefaultDeleter', template_parameters=['ns3::TraceSourceAccessor'])
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector [class]
module.add_class('EventGarbageCollector')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId')
## global-value.h (module 'core'): ns3::GlobalValue [class]
module.add_class('GlobalValue')
typehandlers.add_type_alias(u'std::vector< ns3::GlobalValue * > const_iterator', u'ns3::GlobalValue::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::GlobalValue * > const_iterator*', u'ns3::GlobalValue::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::GlobalValue * > const_iterator&', u'ns3::GlobalValue::Iterator&')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher')
## int-to-type.h (module 'core'): ns3::IntToType<0> [struct]
module.add_class('IntToType', template_parameters=['0'])
## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'])
## int-to-type.h (module 'core'): ns3::IntToType<1> [struct]
module.add_class('IntToType', template_parameters=['1'])
## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'])
## int-to-type.h (module 'core'): ns3::IntToType<2> [struct]
module.add_class('IntToType', template_parameters=['2'])
## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'])
## int-to-type.h (module 'core'): ns3::IntToType<3> [struct]
module.add_class('IntToType', template_parameters=['3'])
## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'])
## int-to-type.h (module 'core'): ns3::IntToType<4> [struct]
module.add_class('IntToType', template_parameters=['4'])
## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'])
## int-to-type.h (module 'core'): ns3::IntToType<5> [struct]
module.add_class('IntToType', template_parameters=['5'])
## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'])
## int-to-type.h (module 'core'): ns3::IntToType<6> [struct]
module.add_class('IntToType', template_parameters=['6'])
## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'])
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent')
typehandlers.add_type_alias(u'std::map< std::string, ns3::LogComponent * >', u'ns3::LogComponent::ComponentList')
typehandlers.add_type_alias(u'std::map< std::string, ns3::LogComponent * >*', u'ns3::LogComponent::ComponentList*')
typehandlers.add_type_alias(u'std::map< std::string, ns3::LogComponent * >&', u'ns3::LogComponent::ComponentList&')
## names.h (module 'core'): ns3::Names [class]
module.add_class('Names')
## non-copyable.h (module 'core'): ns3::NonCopyable [class]
module.add_class('NonCopyable', destructor_visibility='protected')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True)
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory')
## log.h (module 'core'): ns3::ParameterLogger [class]
module.add_class('ParameterLogger')
## random-variable-stream-helper.h (module 'core'): ns3::RandomVariableStreamHelper [class]
module.add_class('RandomVariableStreamHelper')
## rng-seed-manager.h (module 'core'): ns3::RngSeedManager [class]
module.add_class('RngSeedManager')
## rng-stream.h (module 'core'): ns3::RngStream [class]
module.add_class('RngStream')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private')
## simulator.h (module 'core'): ns3::Simulator [enumeration]
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'])
## singleton.h (module 'core'): ns3::Singleton<ns3::DesMetrics> [class]
module.add_class('Singleton', template_parameters=['ns3::DesMetrics'], parent=root_module['ns3::NonCopyable'])
## system-condition.h (module 'core'): ns3::SystemCondition [class]
module.add_class('SystemCondition')
## system-mutex.h (module 'core'): ns3::SystemMutex [class]
module.add_class('SystemMutex')
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs [class]
module.add_class('SystemWallClockMs')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit')
## timer.h (module 'core'): ns3::Timer [class]
module.add_class('Timer')
## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration]
module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'])
## timer.h (module 'core'): ns3::Timer::State [enumeration]
module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'])
## timer-impl.h (module 'core'): ns3::TimerImpl [class]
module.add_class('TimerImpl', allow_subclassing=True)
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D')
## watchdog.h (module 'core'): ns3::Watchdog [class]
module.add_class('Watchdog')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty')
## int64x64-128.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t')
## int64x64-128.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'])
## des-metrics.h (module 'core'): ns3::DesMetrics [class]
module.add_class('DesMetrics', parent=root_module['ns3::Singleton< ns3::DesMetrics >'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', outer_class=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', parent=root_module['ns3::Object'])
## scheduler.h (module 'core'): ns3::Scheduler [class]
module.add_class('Scheduler', parent=root_module['ns3::Object'])
## scheduler.h (module 'core'): ns3::Scheduler::Event [struct]
module.add_class('Event', outer_class=root_module['ns3::Scheduler'])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey [struct]
module.add_class('EventKey', outer_class=root_module['ns3::Scheduler'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::FdReader', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FdReader>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::RefCountBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::RefCountBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator-impl.h (module 'core'): ns3::SimulatorImpl [class]
module.add_class('SimulatorImpl', parent=root_module['ns3::Object'])
## synchronizer.h (module 'core'): ns3::Synchronizer [class]
module.add_class('Synchronizer', parent=root_module['ns3::Object'])
## system-thread.h (module 'core'): ns3::SystemThread [class]
module.add_class('SystemThread', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
typehandlers.add_type_alias(u'pthread_t', u'ns3::SystemThread::ThreadId')
typehandlers.add_type_alias(u'pthread_t*', u'ns3::SystemThread::ThreadId*')
typehandlers.add_type_alias(u'pthread_t&', u'ns3::SystemThread::ThreadId&')
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )', u'ns3::Time::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )*', u'ns3::Time::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )&', u'ns3::Time::TracedCallback&')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer [class]
module.add_class('WallClockSynchronizer', parent=root_module['ns3::Synchronizer'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h (module 'core'): ns3::BooleanChecker [class]
module.add_class('BooleanChecker', parent=root_module['ns3::AttributeChecker'])
## boolean.h (module 'core'): ns3::BooleanValue [class]
module.add_class('BooleanValue', parent=root_module['ns3::AttributeValue'])
## calendar-scheduler.h (module 'core'): ns3::CalendarScheduler [class]
module.add_class('CalendarScheduler', parent=root_module['ns3::Scheduler'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## default-simulator-impl.h (module 'core'): ns3::DefaultSimulatorImpl [class]
module.add_class('DefaultSimulatorImpl', parent=root_module['ns3::SimulatorImpl'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## double.h (module 'core'): ns3::DoubleValue [class]
module.add_class('DoubleValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', parent=root_module['ns3::AttributeValue'])
## enum.h (module 'core'): ns3::EnumChecker [class]
module.add_class('EnumChecker', parent=root_module['ns3::AttributeChecker'])
## enum.h (module 'core'): ns3::EnumValue [class]
module.add_class('EnumValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## unix-fd-reader.h (module 'core'): ns3::FdReader [class]
module.add_class('FdReader', parent=root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## heap-scheduler.h (module 'core'): ns3::HeapScheduler [class]
module.add_class('HeapScheduler', parent=root_module['ns3::Scheduler'])
## integer.h (module 'core'): ns3::IntegerValue [class]
module.add_class('IntegerValue', parent=root_module['ns3::AttributeValue'])
## list-scheduler.h (module 'core'): ns3::ListScheduler [class]
module.add_class('ListScheduler', parent=root_module['ns3::Scheduler'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## map-scheduler.h (module 'core'): ns3::MapScheduler [class]
module.add_class('MapScheduler', parent=root_module['ns3::Scheduler'])
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', parent=root_module['ns3::AttributeValue'])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerAccessor [class]
module.add_class('ObjectPtrContainerAccessor', parent=root_module['ns3::AttributeAccessor'])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerChecker [class]
module.add_class('ObjectPtrContainerChecker', parent=root_module['ns3::AttributeChecker'])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue [class]
module.add_class('ObjectPtrContainerValue', parent=root_module['ns3::AttributeValue'])
typehandlers.add_type_alias(u'std::map< unsigned long long, ns3::Ptr< ns3::Object > > const_iterator', u'ns3::ObjectPtrContainerValue::Iterator')
typehandlers.add_type_alias(u'std::map< unsigned long long, ns3::Ptr< ns3::Object > > const_iterator*', u'ns3::ObjectPtrContainerValue::Iterator*')
typehandlers.add_type_alias(u'std::map< unsigned long long, ns3::Ptr< ns3::Object > > const_iterator&', u'ns3::ObjectPtrContainerValue::Iterator&')
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## pointer.h (module 'core'): ns3::PointerChecker [class]
module.add_class('PointerChecker', parent=root_module['ns3::AttributeChecker'])
## pointer.h (module 'core'): ns3::PointerValue [class]
module.add_class('PointerValue', parent=root_module['ns3::AttributeValue'])
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl [class]
module.add_class('RealtimeSimulatorImpl', parent=root_module['ns3::SimulatorImpl'])
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::SynchronizationMode [enumeration]
module.add_enum('SynchronizationMode', ['SYNC_BEST_EFFORT', 'SYNC_HARD_LIMIT'], outer_class=root_module['ns3::RealtimeSimulatorImpl'])
## ref-count-base.h (module 'core'): ns3::RefCountBase [class]
module.add_class('RefCountBase', parent=root_module['ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
## string.h (module 'core'): ns3::StringChecker [class]
module.add_class('StringChecker', parent=root_module['ns3::AttributeChecker'])
## string.h (module 'core'): ns3::StringValue [class]
module.add_class('StringValue', parent=root_module['ns3::AttributeValue'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', parent=root_module['ns3::AttributeValue'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackImpl<bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', template_parameters=['bool', 'std::basic_string<char>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', template_parameters=['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', template_parameters=['void', 'unsigned char *', 'long', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_container('std::vector< std::string >', 'std::string', container_type=u'vector')
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type=u'map')
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
typehandlers.add_type_alias(u'ns3::RngSeedManager', u'ns3::SeedManager')
typehandlers.add_type_alias(u'ns3::RngSeedManager*', u'ns3::SeedManager*')
typehandlers.add_type_alias(u'ns3::RngSeedManager&', u'ns3::SeedManager&')
module.add_typedef(root_module['ns3::RngSeedManager'], 'SeedManager')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue', u'ns3::ObjectVectorValue')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue*', u'ns3::ObjectVectorValue*')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue&', u'ns3::ObjectVectorValue&')
module.add_typedef(root_module['ns3::ObjectPtrContainerValue'], 'ObjectVectorValue')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue', u'ns3::ObjectMapValue')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue*', u'ns3::ObjectMapValue*')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue&', u'ns3::ObjectMapValue&')
module.add_typedef(root_module['ns3::ObjectPtrContainerValue'], 'ObjectMapValue')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )', u'ns3::LogTimePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )*', u'ns3::LogTimePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )&', u'ns3::LogTimePrinter&')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )', u'ns3::LogNodePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )*', u'ns3::LogNodePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )&', u'ns3::LogNodePrinter&')
## Register a nested module for the namespace CommandLineHelper
nested_module = module.add_cpp_namespace('CommandLineHelper')
register_types_ns3_CommandLineHelper(nested_module)
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace SystemPath
nested_module = module.add_cpp_namespace('SystemPath')
register_types_ns3_SystemPath(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace tests
nested_module = module.add_cpp_namespace('tests')
register_types_ns3_tests(nested_module)
def register_types_ns3_CommandLineHelper(module):
root_module = module.get_root()
def register_types_ns3_Config(module):
root_module = module.get_root()
## config.h (module 'core'): ns3::Config::MatchContainer [class]
module.add_class('MatchContainer')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Object > > const_iterator', u'ns3::Config::MatchContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Object > > const_iterator*', u'ns3::Config::MatchContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Object > > const_iterator&', u'ns3::Config::MatchContainer::Iterator&')
module.add_container('std::vector< ns3::Ptr< ns3::Object > >', 'ns3::Ptr< ns3::Object >', container_type=u'vector')
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_SystemPath(module):
root_module = module.get_root()
module.add_container('std::list< std::string >', 'std::string', container_type=u'list')
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time )', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time )*', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time )&', u'ns3::TracedValueCallback::Time&')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool )', u'ns3::TracedValueCallback::Bool')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool )*', u'ns3::TracedValueCallback::Bool*')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool )&', u'ns3::TracedValueCallback::Bool&')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t )', u'ns3::TracedValueCallback::Int8')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t )*', u'ns3::TracedValueCallback::Int8*')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t )&', u'ns3::TracedValueCallback::Int8&')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t )', u'ns3::TracedValueCallback::Uint8')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t )*', u'ns3::TracedValueCallback::Uint8*')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t )&', u'ns3::TracedValueCallback::Uint8&')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t )', u'ns3::TracedValueCallback::Int16')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t )*', u'ns3::TracedValueCallback::Int16*')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t )&', u'ns3::TracedValueCallback::Int16&')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t )', u'ns3::TracedValueCallback::Uint16')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t )*', u'ns3::TracedValueCallback::Uint16*')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t )&', u'ns3::TracedValueCallback::Uint16&')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t )', u'ns3::TracedValueCallback::Int32')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t )*', u'ns3::TracedValueCallback::Int32*')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t )&', u'ns3::TracedValueCallback::Int32&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )', u'ns3::TracedValueCallback::Uint32')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )*', u'ns3::TracedValueCallback::Uint32*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )&', u'ns3::TracedValueCallback::Uint32&')
typehandlers.add_type_alias(u'void ( * ) ( double, double )', u'ns3::TracedValueCallback::Double')
typehandlers.add_type_alias(u'void ( * ) ( double, double )*', u'ns3::TracedValueCallback::Double*')
typehandlers.add_type_alias(u'void ( * ) ( double, double )&', u'ns3::TracedValueCallback::Double&')
typehandlers.add_type_alias(u'void ( * ) ( )', u'ns3::TracedValueCallback::Void')
typehandlers.add_type_alias(u'void ( * ) ( )*', u'ns3::TracedValueCallback::Void*')
typehandlers.add_type_alias(u'void ( * ) ( )&', u'ns3::TracedValueCallback::Void&')
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_tests(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3CommandLine_methods(root_module, root_module['ns3::CommandLine'])
register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3SystemThread_methods(root_module, root_module['ns3::DefaultDeleter< ns3::SystemThread >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3GlobalValue_methods(root_module, root_module['ns3::GlobalValue'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >'])
register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >'])
register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >'])
register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >'])
register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >'])
register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >'])
register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3Names_methods(root_module, root_module['ns3::Names'])
register_Ns3NonCopyable_methods(root_module, root_module['ns3::NonCopyable'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger'])
register_Ns3RandomVariableStreamHelper_methods(root_module, root_module['ns3::RandomVariableStreamHelper'])
register_Ns3RngSeedManager_methods(root_module, root_module['ns3::RngSeedManager'])
register_Ns3RngStream_methods(root_module, root_module['ns3::RngStream'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Singleton__Ns3DesMetrics_methods(root_module, root_module['ns3::Singleton< ns3::DesMetrics >'])
register_Ns3SystemCondition_methods(root_module, root_module['ns3::SystemCondition'])
register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex'])
register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3Timer_methods(root_module, root_module['ns3::Timer'])
register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Watchdog_methods(root_module, root_module['ns3::Watchdog'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3DesMetrics_methods(root_module, root_module['ns3::DesMetrics'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3Scheduler_methods(root_module, root_module['ns3::Scheduler'])
register_Ns3SchedulerEvent_methods(root_module, root_module['ns3::Scheduler::Event'])
register_Ns3SchedulerEventKey_methods(root_module, root_module['ns3::Scheduler::EventKey'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3RefCountBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3RefCountBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SimulatorImpl_methods(root_module, root_module['ns3::SimulatorImpl'])
register_Ns3Synchronizer_methods(root_module, root_module['ns3::Synchronizer'])
register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WallClockSynchronizer_methods(root_module, root_module['ns3::WallClockSynchronizer'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CalendarScheduler_methods(root_module, root_module['ns3::CalendarScheduler'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DefaultSimulatorImpl_methods(root_module, root_module['ns3::DefaultSimulatorImpl'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3HeapScheduler_methods(root_module, root_module['ns3::HeapScheduler'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3ListScheduler_methods(root_module, root_module['ns3::ListScheduler'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3MapScheduler_methods(root_module, root_module['ns3::MapScheduler'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3ObjectPtrContainerAccessor_methods(root_module, root_module['ns3::ObjectPtrContainerAccessor'])
register_Ns3ObjectPtrContainerChecker_methods(root_module, root_module['ns3::ObjectPtrContainerChecker'])
register_Ns3ObjectPtrContainerValue_methods(root_module, root_module['ns3::ObjectPtrContainerValue'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3PointerChecker_methods(root_module, root_module['ns3::PointerChecker'])
register_Ns3PointerValue_methods(root_module, root_module['ns3::PointerValue'])
register_Ns3RealtimeSimulatorImpl_methods(root_module, root_module['ns3::RealtimeSimulatorImpl'])
register_Ns3RefCountBase_methods(root_module, root_module['ns3::RefCountBase'])
register_Ns3StringChecker_methods(root_module, root_module['ns3::StringChecker'])
register_Ns3StringValue_methods(root_module, root_module['ns3::StringValue'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3CallbackImpl__Bool_StdBasic_string__lt__char__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Unsigned_char___star___Long_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3ConfigMatchContainer_methods(root_module, root_module['ns3::Config::MatchContainer'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3CommandLine_methods(root_module, cls):
cls.add_output_stream_operator()
## command-line.h (module 'core'): ns3::CommandLine::CommandLine() [constructor]
cls.add_constructor([])
## command-line.h (module 'core'): ns3::CommandLine::CommandLine(ns3::CommandLine const & cmd) [constructor]
cls.add_constructor([param('ns3::CommandLine const &', 'cmd')])
## command-line.h (module 'core'): void ns3::CommandLine::AddValue(std::string const & name, std::string const & help, ns3::Callback<bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddValue',
'void',
[param('std::string const &', 'name'), param('std::string const &', 'help'), param('ns3::Callback< bool, std::basic_string< char >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## command-line.h (module 'core'): void ns3::CommandLine::AddValue(std::string const & name, std::string const & attributePath) [member function]
cls.add_method('AddValue',
'void',
[param('std::string const &', 'name'), param('std::string const &', 'attributePath')])
## command-line.h (module 'core'): std::string ns3::CommandLine::GetExtraNonOption(std::size_t i) const [member function]
cls.add_method('GetExtraNonOption',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## command-line.h (module 'core'): std::size_t ns3::CommandLine::GetNExtraNonOptions() const [member function]
cls.add_method('GetNExtraNonOptions',
'std::size_t',
[],
is_const=True)
## command-line.h (module 'core'): std::string ns3::CommandLine::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## command-line.h (module 'core'): void ns3::CommandLine::Parse(int argc, char * * argv) [member function]
cls.add_method('Parse',
'void',
[param('int', 'argc'), param('char * *', 'argv')])
## command-line.h (module 'core'): void ns3::CommandLine::Parse(std::vector<std::basic_string<char>, std::allocator<std::basic_string<char> > > args) [member function]
cls.add_method('Parse',
'void',
[param('std::vector< std::string >', 'args')])
## command-line.h (module 'core'): void ns3::CommandLine::PrintHelp(std::ostream & os) const [member function]
cls.add_method('PrintHelp',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## command-line.h (module 'core'): void ns3::CommandLine::Usage(std::string const usage) [member function]
cls.add_method('Usage',
'void',
[param('std::string const', 'usage')])
return
def register_Ns3CriticalSection_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::SystemMutex & mutex) [constructor]
cls.add_constructor([param('ns3::SystemMutex &', 'mutex')])
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::CriticalSection const & arg0) [constructor]
cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')])
return
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeAccessor *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeChecker *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeValue *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::CallbackImplBase *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter(ns3::DefaultDeleter<ns3::EventImpl> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::EventImpl > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::EventImpl>::Delete(ns3::EventImpl * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::EventImpl *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Hash::Implementation *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3SystemThread_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::SystemThread>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::SystemThread>::DefaultDeleter(ns3::DefaultDeleter<ns3::SystemThread> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::SystemThread > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::SystemThread>::Delete(ns3::SystemThread * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::SystemThread *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::TraceSourceAccessor *', 'object')],
is_static=True)
return
def register_Ns3EventGarbageCollector_methods(root_module, cls):
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector() [constructor]
cls.add_constructor([])
## event-garbage-collector.h (module 'core'): void ns3::EventGarbageCollector::Track(ns3::EventId event) [member function]
cls.add_method('Track',
'void',
[param('ns3::EventId', 'event')])
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector(ns3::EventGarbageCollector const & arg0) [constructor]
cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')])
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3GlobalValue_methods(root_module, cls):
## global-value.h (module 'core'): ns3::GlobalValue::GlobalValue(ns3::GlobalValue const & arg0) [constructor]
cls.add_constructor([param('ns3::GlobalValue const &', 'arg0')])
## global-value.h (module 'core'): ns3::GlobalValue::GlobalValue(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeChecker> checker) [constructor]
cls.add_constructor([param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## global-value.h (module 'core'): static ns3::GlobalValue::Iterator ns3::GlobalValue::Begin() [member function]
cls.add_method('Begin',
'ns3::GlobalValue::Iterator',
[],
is_static=True)
## global-value.h (module 'core'): static void ns3::GlobalValue::Bind(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Bind',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')],
is_static=True)
## global-value.h (module 'core'): static bool ns3::GlobalValue::BindFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('BindFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')],
is_static=True)
## global-value.h (module 'core'): static ns3::GlobalValue::Iterator ns3::GlobalValue::End() [member function]
cls.add_method('End',
'ns3::GlobalValue::Iterator',
[],
is_static=True)
## global-value.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::GlobalValue::GetChecker() const [member function]
cls.add_method('GetChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[],
is_const=True)
## global-value.h (module 'core'): std::string ns3::GlobalValue::GetHelp() const [member function]
cls.add_method('GetHelp',
'std::string',
[],
is_const=True)
## global-value.h (module 'core'): std::string ns3::GlobalValue::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## global-value.h (module 'core'): void ns3::GlobalValue::GetValue(ns3::AttributeValue & value) const [member function]
cls.add_method('GetValue',
'void',
[param('ns3::AttributeValue &', 'value')],
is_const=True)
## global-value.h (module 'core'): static void ns3::GlobalValue::GetValueByName(std::string name, ns3::AttributeValue & value) [member function]
cls.add_method('GetValueByName',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_static=True)
## global-value.h (module 'core'): static bool ns3::GlobalValue::GetValueByNameFailSafe(std::string name, ns3::AttributeValue & value) [member function]
cls.add_method('GetValueByNameFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_static=True)
## global-value.h (module 'core'): void ns3::GlobalValue::ResetInitialValue() [member function]
cls.add_method('ResetInitialValue',
'void',
[])
## global-value.h (module 'core'): bool ns3::GlobalValue::SetValue(ns3::AttributeValue const & value) [member function]
cls.add_method('SetValue',
'bool',
[param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3IntToType__0_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')])
return
def register_Ns3IntToType__1_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')])
return
def register_Ns3IntToType__2_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')])
return
def register_Ns3IntToType__3_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')])
return
def register_Ns3IntToType__4_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')])
return
def register_Ns3IntToType__5_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')])
return
def register_Ns3IntToType__6_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')])
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LogLevel::LOG_NONE) [constructor]
cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LogLevel::LOG_NONE')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function]
cls.add_method('File',
'std::string',
[],
is_const=True)
## log.h (module 'core'): static ns3::LogComponent::ComponentList * ns3::LogComponent::GetComponentList() [member function]
cls.add_method('GetComponentList',
'ns3::LogComponent::ComponentList *',
[],
is_static=True)
## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function]
cls.add_method('GetLevelLabel',
'std::string',
[param('ns3::LogLevel const', 'level')],
is_static=True)
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel const', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::LogLevel const', 'level')])
return
def register_Ns3Names_methods(root_module, cls):
## names.h (module 'core'): ns3::Names::Names() [constructor]
cls.add_constructor([])
## names.h (module 'core'): ns3::Names::Names(ns3::Names const & arg0) [constructor]
cls.add_constructor([param('ns3::Names const &', 'arg0')])
## names.h (module 'core'): static void ns3::Names::Add(std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Add(std::string path, std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'path'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Add(ns3::Ptr<ns3::Object> context, std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_static=True)
## names.h (module 'core'): static std::string ns3::Names::FindName(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('FindName',
'std::string',
[param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static std::string ns3::Names::FindPath(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('FindPath',
'std::string',
[param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Rename(std::string oldpath, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('std::string', 'oldpath'), param('std::string', 'newname')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Rename(std::string path, std::string oldname, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('std::string', 'path'), param('std::string', 'oldname'), param('std::string', 'newname')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Rename(ns3::Ptr<ns3::Object> context, std::string oldname, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'oldname'), param('std::string', 'newname')],
is_static=True)
return
def register_Ns3NonCopyable_methods(root_module, cls):
## non-copyable.h (module 'core'): ns3::NonCopyable::NonCopyable() [constructor]
cls.add_constructor([],
visibility='protected')
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3ParameterLogger_methods(root_module, cls):
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [constructor]
cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')])
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor]
cls.add_constructor([param('std::ostream &', 'os')])
return
def register_Ns3RandomVariableStreamHelper_methods(root_module, cls):
## random-variable-stream-helper.h (module 'core'): ns3::RandomVariableStreamHelper::RandomVariableStreamHelper() [constructor]
cls.add_constructor([])
## random-variable-stream-helper.h (module 'core'): ns3::RandomVariableStreamHelper::RandomVariableStreamHelper(ns3::RandomVariableStreamHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::RandomVariableStreamHelper const &', 'arg0')])
## random-variable-stream-helper.h (module 'core'): static int64_t ns3::RandomVariableStreamHelper::AssignStreams(std::string path, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('std::string', 'path'), param('int64_t', 'stream')],
is_static=True)
return
def register_Ns3RngSeedManager_methods(root_module, cls):
## rng-seed-manager.h (module 'core'): ns3::RngSeedManager::RngSeedManager() [constructor]
cls.add_constructor([])
## rng-seed-manager.h (module 'core'): ns3::RngSeedManager::RngSeedManager(ns3::RngSeedManager const & arg0) [constructor]
cls.add_constructor([param('ns3::RngSeedManager const &', 'arg0')])
## rng-seed-manager.h (module 'core'): static uint64_t ns3::RngSeedManager::GetNextStreamIndex() [member function]
cls.add_method('GetNextStreamIndex',
'uint64_t',
[],
is_static=True)
## rng-seed-manager.h (module 'core'): static uint64_t ns3::RngSeedManager::GetRun() [member function]
cls.add_method('GetRun',
'uint64_t',
[],
is_static=True)
## rng-seed-manager.h (module 'core'): static uint32_t ns3::RngSeedManager::GetSeed() [member function]
cls.add_method('GetSeed',
'uint32_t',
[],
is_static=True)
## rng-seed-manager.h (module 'core'): static void ns3::RngSeedManager::SetRun(uint64_t run) [member function]
cls.add_method('SetRun',
'void',
[param('uint64_t', 'run')],
is_static=True)
## rng-seed-manager.h (module 'core'): static void ns3::RngSeedManager::SetSeed(uint32_t seed) [member function]
cls.add_method('SetSeed',
'void',
[param('uint32_t', 'seed')],
is_static=True)
return
def register_Ns3RngStream_methods(root_module, cls):
## rng-stream.h (module 'core'): ns3::RngStream::RngStream(uint32_t seed, uint64_t stream, uint64_t substream) [constructor]
cls.add_constructor([param('uint32_t', 'seed'), param('uint64_t', 'stream'), param('uint64_t', 'substream')])
## rng-stream.h (module 'core'): ns3::RngStream::RngStream(ns3::RngStream const & r) [constructor]
cls.add_constructor([param('ns3::RngStream const &', 'r')])
## rng-stream.h (module 'core'): double ns3::RngStream::RandU01() [member function]
cls.add_method('RandU01',
'double',
[])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_static=True)
return
def register_Ns3Singleton__Ns3DesMetrics_methods(root_module, cls):
## singleton.h (module 'core'): static ns3::DesMetrics * ns3::Singleton<ns3::DesMetrics>::Get() [member function]
cls.add_method('Get',
'ns3::DesMetrics *',
[],
is_static=True)
## singleton.h (module 'core'): ns3::Singleton<ns3::DesMetrics>::Singleton() [constructor]
cls.add_constructor([])
return
def register_Ns3SystemCondition_methods(root_module, cls):
## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition(ns3::SystemCondition const & arg0) [constructor]
cls.add_constructor([param('ns3::SystemCondition const &', 'arg0')])
## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition() [constructor]
cls.add_constructor([])
## system-condition.h (module 'core'): void ns3::SystemCondition::Broadcast() [member function]
cls.add_method('Broadcast',
'void',
[])
## system-condition.h (module 'core'): bool ns3::SystemCondition::GetCondition() [member function]
cls.add_method('GetCondition',
'bool',
[])
## system-condition.h (module 'core'): void ns3::SystemCondition::SetCondition(bool condition) [member function]
cls.add_method('SetCondition',
'void',
[param('bool', 'condition')])
## system-condition.h (module 'core'): void ns3::SystemCondition::Signal() [member function]
cls.add_method('Signal',
'void',
[])
## system-condition.h (module 'core'): bool ns3::SystemCondition::TimedWait(uint64_t ns) [member function]
cls.add_method('TimedWait',
'bool',
[param('uint64_t', 'ns')])
## system-condition.h (module 'core'): void ns3::SystemCondition::Wait() [member function]
cls.add_method('Wait',
'void',
[])
return
def register_Ns3SystemMutex_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex(ns3::SystemMutex const & arg0) [constructor]
cls.add_constructor([param('ns3::SystemMutex const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex() [constructor]
cls.add_constructor([])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Lock() [member function]
cls.add_method('Lock',
'void',
[])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Unlock() [member function]
cls.add_method('Unlock',
'void',
[])
return
def register_Ns3SystemWallClockMs_methods(root_module, cls):
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs(ns3::SystemWallClockMs const & arg0) [constructor]
cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')])
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs() [constructor]
cls.add_constructor([])
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::End() [member function]
cls.add_method('End',
'int64_t',
[])
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedReal() const [member function]
cls.add_method('GetElapsedReal',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedSystem() const [member function]
cls.add_method('GetElapsedSystem',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedUser() const [member function]
cls.add_method('GetElapsedUser',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): void ns3::SystemWallClockMs::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3Timer_methods(root_module, cls):
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer const & arg0) [constructor]
cls.add_constructor([param('ns3::Timer const &', 'arg0')])
## timer.h (module 'core'): ns3::Timer::Timer() [constructor]
cls.add_constructor([])
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer::DestroyPolicy destroyPolicy) [constructor]
cls.add_constructor([param('ns3::Timer::DestroyPolicy', 'destroyPolicy')])
## timer.h (module 'core'): void ns3::Timer::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelay() const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelayLeft() const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Timer::State ns3::Timer::GetState() const [member function]
cls.add_method('GetState',
'ns3::Timer::State',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsSuspended() const [member function]
cls.add_method('IsSuspended',
'bool',
[],
is_const=True)
## timer.h (module 'core'): void ns3::Timer::Remove() [member function]
cls.add_method('Remove',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Resume() [member function]
cls.add_method('Resume',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule() [member function]
cls.add_method('Schedule',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule(ns3::Time delay) [member function]
cls.add_method('Schedule',
'void',
[param('ns3::Time', 'delay')])
## timer.h (module 'core'): void ns3::Timer::SetDelay(ns3::Time const & delay) [member function]
cls.add_method('SetDelay',
'void',
[param('ns3::Time const &', 'delay')])
## timer.h (module 'core'): void ns3::Timer::Suspend() [member function]
cls.add_method('Suspend',
'void',
[])
return
def register_Ns3TimerImpl_methods(root_module, cls):
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl() [constructor]
cls.add_constructor([])
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl(ns3::TimerImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::TimerImpl const &', 'arg0')])
## timer-impl.h (module 'core'): void ns3::TimerImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## timer-impl.h (module 'core'): ns3::EventId ns3::TimerImpl::Schedule(ns3::Time const & delay) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'ns3::TypeId::hash_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint16_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint16_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
cls.add_binary_numeric_operator('-', root_module['ns3::Vector2D'], root_module['ns3::Vector2D'], param('ns3::Vector2D const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Vector2D'], root_module['ns3::Vector2D'], param('ns3::Vector2D const &', u'right'))
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): double ns3::Vector2D::GetLength() const [member function]
cls.add_method('GetLength',
'double',
[],
is_const=True)
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
cls.add_binary_numeric_operator('-', root_module['ns3::Vector3D'], root_module['ns3::Vector3D'], param('ns3::Vector3D const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Vector3D'], root_module['ns3::Vector3D'], param('ns3::Vector3D const &', u'right'))
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): double ns3::Vector3D::GetLength() const [member function]
cls.add_method('GetLength',
'double',
[],
is_const=True)
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Watchdog_methods(root_module, cls):
## watchdog.h (module 'core'): ns3::Watchdog::Watchdog(ns3::Watchdog const & arg0) [constructor]
cls.add_constructor([param('ns3::Watchdog const &', 'arg0')])
## watchdog.h (module 'core'): ns3::Watchdog::Watchdog() [constructor]
cls.add_constructor([])
## watchdog.h (module 'core'): void ns3::Watchdog::Ping(ns3::Time delay) [member function]
cls.add_method('Ping',
'void',
[param('ns3::Time', 'delay')])
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(double const value) [constructor]
cls.add_constructor([param('double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long double const value) [constructor]
cls.add_constructor([param('long double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int const v) [constructor]
cls.add_constructor([param('int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long int const v) [constructor]
cls.add_constructor([param('long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int const v) [constructor]
cls.add_constructor([param('long long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int const v) [constructor]
cls.add_constructor([param('unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int const v) [constructor]
cls.add_constructor([param('long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int const v) [constructor]
cls.add_constructor([param('long long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t const hi, uint64_t const lo) [constructor]
cls.add_constructor([param('int64_t const', 'hi'), param('uint64_t const', 'lo')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t const v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t const', 'v')],
is_static=True)
## int64x64-128.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3DesMetrics_methods(root_module, cls):
## des-metrics.h (module 'core'): void ns3::DesMetrics::Initialize(std::vector<std::basic_string<char>, std::allocator<std::basic_string<char> > > args, std::string outDir="") [member function]
cls.add_method('Initialize',
'void',
[param('std::vector< std::string >', 'args'), param('std::string', 'outDir', default_value='""')])
## des-metrics.h (module 'core'): void ns3::DesMetrics::Trace(ns3::Time const & now, ns3::Time const & delay) [member function]
cls.add_method('Trace',
'void',
[param('ns3::Time const &', 'now'), param('ns3::Time const &', 'delay')])
## des-metrics.h (module 'core'): void ns3::DesMetrics::TraceWithContext(uint32_t context, ns3::Time const & now, ns3::Time const & delay) [member function]
cls.add_method('TraceWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'now'), param('ns3::Time const &', 'delay')])
## des-metrics.h (module 'core'): ns3::DesMetrics::DesMetrics() [constructor]
cls.add_constructor([])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject(ns3::TypeId tid) const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[param('ns3::TypeId', 'tid')],
is_const=True, template_parameters=[u'ns3::Object'], custom_template_method_name=u'GetObject')
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3Scheduler_methods(root_module, cls):
## scheduler.h (module 'core'): ns3::Scheduler::Scheduler() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::Scheduler(ns3::Scheduler const & arg0) [constructor]
cls.add_constructor([param('ns3::Scheduler const &', 'arg0')])
## scheduler.h (module 'core'): static ns3::TypeId ns3::Scheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## scheduler.h (module 'core'): void ns3::Scheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## scheduler.h (module 'core'): bool ns3::Scheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## scheduler.h (module 'core'): ns3::Scheduler::Event ns3::Scheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## scheduler.h (module 'core'): void ns3::Scheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## scheduler.h (module 'core'): ns3::Scheduler::Event ns3::Scheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SchedulerEvent_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
## scheduler.h (module 'core'): ns3::Scheduler::Event::Event() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::Event::Event(ns3::Scheduler::Event const & arg0) [constructor]
cls.add_constructor([param('ns3::Scheduler::Event const &', 'arg0')])
## scheduler.h (module 'core'): ns3::Scheduler::Event::impl [variable]
cls.add_instance_attribute('impl', 'ns3::EventImpl *', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::Event::key [variable]
cls.add_instance_attribute('key', 'ns3::Scheduler::EventKey', is_const=False)
return
def register_Ns3SchedulerEventKey_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('>')
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::EventKey() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::EventKey(ns3::Scheduler::EventKey const & arg0) [constructor]
cls.add_constructor([param('ns3::Scheduler::EventKey const &', 'arg0')])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_context [variable]
cls.add_instance_attribute('m_context', 'uint32_t', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_ts [variable]
cls.add_instance_attribute('m_ts', 'uint64_t', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_uid [variable]
cls.add_instance_attribute('m_uid', 'uint32_t', is_const=False)
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter< ns3::FdReader > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3RefCountBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3RefCountBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter< ns3::RefCountBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
return
def register_Ns3SimulatorImpl_methods(root_module, cls):
## simulator-impl.h (module 'core'): ns3::SimulatorImpl::SimulatorImpl() [constructor]
cls.add_constructor([])
## simulator-impl.h (module 'core'): ns3::SimulatorImpl::SimulatorImpl(ns3::SimulatorImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::SimulatorImpl const &', 'arg0')])
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): uint32_t ns3::SimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): uint32_t ns3::SimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): static ns3::TypeId ns3::SimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## simulator-impl.h (module 'core'): bool ns3::SimulatorImpl::IsExpired(ns3::EventId const & id) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): bool ns3::SimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::Schedule(ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Synchronizer_methods(root_module, cls):
## synchronizer.h (module 'core'): ns3::Synchronizer::Synchronizer(ns3::Synchronizer const & arg0) [constructor]
cls.add_constructor([param('ns3::Synchronizer const &', 'arg0')])
## synchronizer.h (module 'core'): ns3::Synchronizer::Synchronizer() [constructor]
cls.add_constructor([])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::EventEnd() [member function]
cls.add_method('EventEnd',
'uint64_t',
[])
## synchronizer.h (module 'core'): void ns3::Synchronizer::EventStart() [member function]
cls.add_method('EventStart',
'void',
[])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::GetCurrentRealtime() [member function]
cls.add_method('GetCurrentRealtime',
'uint64_t',
[])
## synchronizer.h (module 'core'): int64_t ns3::Synchronizer::GetDrift(uint64_t ts) [member function]
cls.add_method('GetDrift',
'int64_t',
[param('uint64_t', 'ts')])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::GetOrigin() [member function]
cls.add_method('GetOrigin',
'uint64_t',
[])
## synchronizer.h (module 'core'): static ns3::TypeId ns3::Synchronizer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## synchronizer.h (module 'core'): bool ns3::Synchronizer::Realtime() [member function]
cls.add_method('Realtime',
'bool',
[])
## synchronizer.h (module 'core'): void ns3::Synchronizer::SetCondition(bool arg0) [member function]
cls.add_method('SetCondition',
'void',
[param('bool', 'arg0')])
## synchronizer.h (module 'core'): void ns3::Synchronizer::SetOrigin(uint64_t ts) [member function]
cls.add_method('SetOrigin',
'void',
[param('uint64_t', 'ts')])
## synchronizer.h (module 'core'): void ns3::Synchronizer::Signal() [member function]
cls.add_method('Signal',
'void',
[])
## synchronizer.h (module 'core'): bool ns3::Synchronizer::Synchronize(uint64_t tsCurrent, uint64_t tsDelay) [member function]
cls.add_method('Synchronize',
'bool',
[param('uint64_t', 'tsCurrent'), param('uint64_t', 'tsDelay')])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::DoEventEnd() [member function]
cls.add_method('DoEventEnd',
'uint64_t',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoEventStart() [member function]
cls.add_method('DoEventStart',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::DoGetCurrentRealtime() [member function]
cls.add_method('DoGetCurrentRealtime',
'uint64_t',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): int64_t ns3::Synchronizer::DoGetDrift(uint64_t ns) [member function]
cls.add_method('DoGetDrift',
'int64_t',
[param('uint64_t', 'ns')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): bool ns3::Synchronizer::DoRealtime() [member function]
cls.add_method('DoRealtime',
'bool',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoSetCondition(bool arg0) [member function]
cls.add_method('DoSetCondition',
'void',
[param('bool', 'arg0')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoSetOrigin(uint64_t ns) [member function]
cls.add_method('DoSetOrigin',
'void',
[param('uint64_t', 'ns')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoSignal() [member function]
cls.add_method('DoSignal',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): bool ns3::Synchronizer::DoSynchronize(uint64_t nsCurrent, uint64_t nsDelay) [member function]
cls.add_method('DoSynchronize',
'bool',
[param('uint64_t', 'nsCurrent'), param('uint64_t', 'nsDelay')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3SystemThread_methods(root_module, cls):
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [constructor]
cls.add_constructor([param('ns3::SystemThread const &', 'arg0')])
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor]
cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(ns3::SystemThread::ThreadId id) [member function]
cls.add_method('Equals',
'bool',
[param('pthread_t', 'id')],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function]
cls.add_method('Join',
'void',
[])
## system-thread.h (module 'core'): static ns3::SystemThread::ThreadId ns3::SystemThread::Self() [member function]
cls.add_method('Self',
'ns3::SystemThread::ThreadId',
[],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WallClockSynchronizer_methods(root_module, cls):
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::WallClockSynchronizer(ns3::WallClockSynchronizer const & arg0) [constructor]
cls.add_constructor([param('ns3::WallClockSynchronizer const &', 'arg0')])
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::WallClockSynchronizer() [constructor]
cls.add_constructor([])
## wall-clock-synchronizer.h (module 'core'): static ns3::TypeId ns3::WallClockSynchronizer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::NS_PER_SEC [variable]
cls.add_static_attribute('NS_PER_SEC', 'uint64_t const', is_const=True)
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::US_PER_NS [variable]
cls.add_static_attribute('US_PER_NS', 'uint64_t const', is_const=True)
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::US_PER_SEC [variable]
cls.add_static_attribute('US_PER_SEC', 'uint64_t const', is_const=True)
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::DoEventEnd() [member function]
cls.add_method('DoEventEnd',
'uint64_t',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoEventStart() [member function]
cls.add_method('DoEventStart',
'void',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::DoGetCurrentRealtime() [member function]
cls.add_method('DoGetCurrentRealtime',
'uint64_t',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): int64_t ns3::WallClockSynchronizer::DoGetDrift(uint64_t ns) [member function]
cls.add_method('DoGetDrift',
'int64_t',
[param('uint64_t', 'ns')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::DoRealtime() [member function]
cls.add_method('DoRealtime',
'bool',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoSetCondition(bool cond) [member function]
cls.add_method('DoSetCondition',
'void',
[param('bool', 'cond')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoSetOrigin(uint64_t ns) [member function]
cls.add_method('DoSetOrigin',
'void',
[param('uint64_t', 'ns')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoSignal() [member function]
cls.add_method('DoSignal',
'void',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::DoSynchronize(uint64_t nsCurrent, uint64_t nsDelay) [member function]
cls.add_method('DoSynchronize',
'bool',
[param('uint64_t', 'nsCurrent'), param('uint64_t', 'nsDelay')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::DriftCorrect(uint64_t nsNow, uint64_t nsDelay) [member function]
cls.add_method('DriftCorrect',
'uint64_t',
[param('uint64_t', 'nsNow'), param('uint64_t', 'nsDelay')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::GetNormalizedRealtime() [member function]
cls.add_method('GetNormalizedRealtime',
'uint64_t',
[],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::GetRealtime() [member function]
cls.add_method('GetRealtime',
'uint64_t',
[],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::NsToTimeval(int64_t ns, timeval * tv) [member function]
cls.add_method('NsToTimeval',
'void',
[param('int64_t', 'ns'), param('timeval *', 'tv')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::SleepWait(uint64_t ns) [member function]
cls.add_method('SleepWait',
'bool',
[param('uint64_t', 'ns')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::SpinWait(uint64_t ns) [member function]
cls.add_method('SpinWait',
'bool',
[param('uint64_t', 'ns')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::TimevalAdd(timeval * tv1, timeval * tv2, timeval * result) [member function]
cls.add_method('TimevalAdd',
'void',
[param('timeval *', 'tv1'), param('timeval *', 'tv2'), param('timeval *', 'result')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::TimevalToNs(timeval * tv) [member function]
cls.add_method('TimevalToNs',
'uint64_t',
[param('timeval *', 'tv')],
visibility='protected')
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CalendarScheduler_methods(root_module, cls):
## calendar-scheduler.h (module 'core'): ns3::CalendarScheduler::CalendarScheduler(ns3::CalendarScheduler const & arg0) [constructor]
cls.add_constructor([param('ns3::CalendarScheduler const &', 'arg0')])
## calendar-scheduler.h (module 'core'): ns3::CalendarScheduler::CalendarScheduler() [constructor]
cls.add_constructor([])
## calendar-scheduler.h (module 'core'): static ns3::TypeId ns3::CalendarScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## calendar-scheduler.h (module 'core'): void ns3::CalendarScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## calendar-scheduler.h (module 'core'): bool ns3::CalendarScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## calendar-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::CalendarScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## calendar-scheduler.h (module 'core'): void ns3::CalendarScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## calendar-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::CalendarScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'bool'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'std::__cxx11::basic_string<char', u' std::char_traits<char>', u' std::allocator<char> > '])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'void'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'unsigned char*'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'long'])
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DefaultSimulatorImpl_methods(root_module, cls):
## default-simulator-impl.h (module 'core'): ns3::DefaultSimulatorImpl::DefaultSimulatorImpl(ns3::DefaultSimulatorImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultSimulatorImpl const &', 'arg0')])
## default-simulator-impl.h (module 'core'): ns3::DefaultSimulatorImpl::DefaultSimulatorImpl() [constructor]
cls.add_constructor([])
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_virtual=True)
## default-simulator-impl.h (module 'core'): uint32_t ns3::DefaultSimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::Time ns3::DefaultSimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::Time ns3::DefaultSimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): uint32_t ns3::DefaultSimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): static ns3::TypeId ns3::DefaultSimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## default-simulator-impl.h (module 'core'): bool ns3::DefaultSimulatorImpl::IsExpired(ns3::EventId const & id) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): bool ns3::DefaultSimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::Time ns3::DefaultSimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::EventId ns3::DefaultSimulatorImpl::Schedule(ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::EventId ns3::DefaultSimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::EventId ns3::DefaultSimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, std::size_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('std::size_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double c1, double c2, double v1, double v2, double r) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'c1'), param('double', 'c2'), param('double', 'v1'), param('double', 'v2'), param('double', 'r')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): void ns3::EnumChecker::Add(int value, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int value, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): ns3::EnumValue::EnumValue(int value) [constructor]
cls.add_constructor([param('int', 'value')])
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): void ns3::EnumValue::Set(int value) [member function]
cls.add_method('Set',
'void',
[param('int', 'value')])
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3FdReader_methods(root_module, cls):
## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader(ns3::FdReader const & arg0) [constructor]
cls.add_constructor([param('ns3::FdReader const &', 'arg0')])
## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader() [constructor]
cls.add_constructor([])
## unix-fd-reader.h (module 'core'): void ns3::FdReader::Start(int fd, ns3::Callback<void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> readCallback) [member function]
cls.add_method('Start',
'void',
[param('int', 'fd'), param('ns3::Callback< void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'readCallback')])
## unix-fd-reader.h (module 'core'): void ns3::FdReader::Stop() [member function]
cls.add_method('Stop',
'void',
[])
## unix-fd-reader.h (module 'core'): ns3::FdReader::Data ns3::FdReader::DoRead() [member function]
cls.add_method('DoRead',
'ns3::FdReader::Data',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3HeapScheduler_methods(root_module, cls):
## heap-scheduler.h (module 'core'): ns3::HeapScheduler::HeapScheduler(ns3::HeapScheduler const & arg0) [constructor]
cls.add_constructor([param('ns3::HeapScheduler const &', 'arg0')])
## heap-scheduler.h (module 'core'): ns3::HeapScheduler::HeapScheduler() [constructor]
cls.add_constructor([])
## heap-scheduler.h (module 'core'): static ns3::TypeId ns3::HeapScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## heap-scheduler.h (module 'core'): void ns3::HeapScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## heap-scheduler.h (module 'core'): bool ns3::HeapScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## heap-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::HeapScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## heap-scheduler.h (module 'core'): void ns3::HeapScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## heap-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::HeapScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3ListScheduler_methods(root_module, cls):
## list-scheduler.h (module 'core'): ns3::ListScheduler::ListScheduler(ns3::ListScheduler const & arg0) [constructor]
cls.add_constructor([param('ns3::ListScheduler const &', 'arg0')])
## list-scheduler.h (module 'core'): ns3::ListScheduler::ListScheduler() [constructor]
cls.add_constructor([])
## list-scheduler.h (module 'core'): static ns3::TypeId ns3::ListScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## list-scheduler.h (module 'core'): void ns3::ListScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## list-scheduler.h (module 'core'): bool ns3::ListScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## list-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::ListScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## list-scheduler.h (module 'core'): void ns3::ListScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## list-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::ListScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3MapScheduler_methods(root_module, cls):
## map-scheduler.h (module 'core'): ns3::MapScheduler::MapScheduler(ns3::MapScheduler const & arg0) [constructor]
cls.add_constructor([param('ns3::MapScheduler const &', 'arg0')])
## map-scheduler.h (module 'core'): ns3::MapScheduler::MapScheduler() [constructor]
cls.add_constructor([])
## map-scheduler.h (module 'core'): static ns3::TypeId ns3::MapScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## map-scheduler.h (module 'core'): void ns3::MapScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## map-scheduler.h (module 'core'): bool ns3::MapScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## map-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::MapScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## map-scheduler.h (module 'core'): void ns3::MapScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## map-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::MapScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3ObjectPtrContainerAccessor_methods(root_module, cls):
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerAccessor::ObjectPtrContainerAccessor() [constructor]
cls.add_constructor([])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerAccessor::ObjectPtrContainerAccessor(ns3::ObjectPtrContainerAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectPtrContainerAccessor const &', 'arg0')])
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & value) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'value')],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectPtrContainerAccessor::DoGet(ns3::ObjectBase const * object, std::size_t i, std::size_t * index) const [member function]
cls.add_method('DoGet',
'ns3::Ptr< ns3::Object >',
[param('ns3::ObjectBase const *', 'object'), param('std::size_t', 'i'), param('std::size_t *', 'index')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::DoGetN(ns3::ObjectBase const * object, std::size_t * n) const [member function]
cls.add_method('DoGetN',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('std::size_t *', 'n')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ObjectPtrContainerChecker_methods(root_module, cls):
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerChecker::ObjectPtrContainerChecker() [constructor]
cls.add_constructor([])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerChecker::ObjectPtrContainerChecker(ns3::ObjectPtrContainerChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectPtrContainerChecker const &', 'arg0')])
## object-ptr-container.h (module 'core'): ns3::TypeId ns3::ObjectPtrContainerChecker::GetItemTypeId() const [member function]
cls.add_method('GetItemTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3ObjectPtrContainerValue_methods(root_module, cls):
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue::ObjectPtrContainerValue(ns3::ObjectPtrContainerValue const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectPtrContainerValue const &', 'arg0')])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue::ObjectPtrContainerValue() [constructor]
cls.add_constructor([])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue::Iterator ns3::ObjectPtrContainerValue::Begin() const [member function]
cls.add_method('Begin',
'ns3::ObjectPtrContainerValue::Iterator',
[],
is_const=True)
## object-ptr-container.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectPtrContainerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue::Iterator ns3::ObjectPtrContainerValue::End() const [member function]
cls.add_method('End',
'ns3::ObjectPtrContainerValue::Iterator',
[],
is_const=True)
## object-ptr-container.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectPtrContainerValue::Get(std::size_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Object >',
[param('std::size_t', 'i')],
is_const=True)
## object-ptr-container.h (module 'core'): std::size_t ns3::ObjectPtrContainerValue::GetN() const [member function]
cls.add_method('GetN',
'std::size_t',
[],
is_const=True)
## object-ptr-container.h (module 'core'): std::string ns3::ObjectPtrContainerValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
deprecated=True, is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3PointerChecker_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker(ns3::PointerChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::PointerChecker const &', 'arg0')])
## pointer.h (module 'core'): ns3::TypeId ns3::PointerChecker::GetPointeeTypeId() const [member function]
cls.add_method('GetPointeeTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3PointerValue_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::PointerValue const & arg0) [constructor]
cls.add_constructor([param('ns3::PointerValue const &', 'arg0')])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::Ptr<ns3::Object> object) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Object >', 'object')])
## pointer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::PointerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): bool ns3::PointerValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## pointer.h (module 'core'): ns3::Ptr<ns3::Object> ns3::PointerValue::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## pointer.h (module 'core'): std::string ns3::PointerValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): void ns3::PointerValue::SetObject(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('SetObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'object')])
return
def register_Ns3RealtimeSimulatorImpl_methods(root_module, cls):
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::RealtimeSimulatorImpl(ns3::RealtimeSimulatorImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::RealtimeSimulatorImpl const &', 'arg0')])
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::RealtimeSimulatorImpl() [constructor]
cls.add_constructor([])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Cancel(ns3::EventId const & ev) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): uint32_t ns3::RealtimeSimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::GetHardLimit() const [member function]
cls.add_method('GetHardLimit',
'ns3::Time',
[],
is_const=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::SynchronizationMode ns3::RealtimeSimulatorImpl::GetSynchronizationMode() const [member function]
cls.add_method('GetSynchronizationMode',
'ns3::RealtimeSimulatorImpl::SynchronizationMode',
[],
is_const=True)
## realtime-simulator-impl.h (module 'core'): uint32_t ns3::RealtimeSimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): static ns3::TypeId ns3::RealtimeSimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## realtime-simulator-impl.h (module 'core'): bool ns3::RealtimeSimulatorImpl::IsExpired(ns3::EventId const & ev) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'ev')],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): bool ns3::RealtimeSimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::RealtimeNow() const [member function]
cls.add_method('RealtimeNow',
'ns3::Time',
[],
is_const=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Remove(ns3::EventId const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::EventId ns3::RealtimeSimulatorImpl::Schedule(ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::EventId ns3::RealtimeSimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::EventId ns3::RealtimeSimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtime(ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtime',
'void',
[param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtimeNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtimeNow',
'void',
[param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtimeNowWithContext(uint32_t context, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtimeNowWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtimeWithContext(uint32_t context, ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtimeWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::SetHardLimit(ns3::Time limit) [member function]
cls.add_method('SetHardLimit',
'void',
[param('ns3::Time', 'limit')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::SetSynchronizationMode(ns3::RealtimeSimulatorImpl::SynchronizationMode mode) [member function]
cls.add_method('SetSynchronizationMode',
'void',
[param('ns3::RealtimeSimulatorImpl::SynchronizationMode', 'mode')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3RefCountBase_methods(root_module, cls):
## ref-count-base.h (module 'core'): ns3::RefCountBase::RefCountBase() [constructor]
cls.add_constructor([])
## ref-count-base.h (module 'core'): ns3::RefCountBase::RefCountBase(ns3::RefCountBase const & arg0) [constructor]
cls.add_constructor([param('ns3::RefCountBase const &', 'arg0')])
return
def register_Ns3StringChecker_methods(root_module, cls):
## string.h (module 'core'): ns3::StringChecker::StringChecker() [constructor]
cls.add_constructor([])
## string.h (module 'core'): ns3::StringChecker::StringChecker(ns3::StringChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::StringChecker const &', 'arg0')])
return
def register_Ns3StringValue_methods(root_module, cls):
## string.h (module 'core'): ns3::StringValue::StringValue() [constructor]
cls.add_constructor([])
## string.h (module 'core'): ns3::StringValue::StringValue(std::string const & value) [constructor]
cls.add_constructor([param('std::string const &', 'value')])
## string.h (module 'core'): ns3::StringValue::StringValue(ns3::StringValue const & arg0) [constructor]
cls.add_constructor([param('ns3::StringValue const &', 'arg0')])
## string.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::StringValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## string.h (module 'core'): bool ns3::StringValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## string.h (module 'core'): std::string ns3::StringValue::Get() const [member function]
cls.add_method('Get',
'std::string',
[],
is_const=True)
## string.h (module 'core'): std::string ns3::StringValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## string.h (module 'core'): void ns3::StringValue::Set(std::string const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string const &', 'value')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3CallbackImpl__Bool_StdBasic_string__lt__char__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< bool, std::basic_string< char >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImpl<bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(std::basic_string<char, std::char_traits<char>, std::allocator<char> > arg0) [member operator]
cls.add_method('operator()',
'bool',
[param('std::string', 'arg0')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): ns3::ObjectBase * ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator]
cls.add_method('operator()',
'ns3::ObjectBase *',
[],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator]
cls.add_method('operator()',
'void',
[],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Unsigned_char___star___Long_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(unsigned char * arg0, long int arg1) [member operator]
cls.add_method('operator()',
'void',
[param('unsigned char *', 'arg0'), param('long int', 'arg1')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3ConfigMatchContainer_methods(root_module, cls):
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer(ns3::Config::MatchContainer const & arg0) [constructor]
cls.add_constructor([param('ns3::Config::MatchContainer const &', 'arg0')])
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer() [constructor]
cls.add_constructor([])
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer(std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > const & objects, std::vector<std::basic_string<char>, std::allocator<std::basic_string<char> > > const & contexts, std::string path) [constructor]
cls.add_constructor([param('std::vector< ns3::Ptr< ns3::Object > > const &', 'objects'), param('std::vector< std::string > const &', 'contexts'), param('std::string', 'path')])
## config.h (module 'core'): ns3::Config::MatchContainer::Iterator ns3::Config::MatchContainer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Config::MatchContainer::Iterator',
[],
is_const=True)
## config.h (module 'core'): void ns3::Config::MatchContainer::Connect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Connect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::ConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::Disconnect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Disconnect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::DisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): ns3::Config::MatchContainer::Iterator ns3::Config::MatchContainer::End() const [member function]
cls.add_method('End',
'ns3::Config::MatchContainer::Iterator',
[],
is_const=True)
## config.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Config::MatchContainer::Get(std::size_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Object >',
[param('std::size_t', 'i')],
is_const=True)
## config.h (module 'core'): std::string ns3::Config::MatchContainer::GetMatchedPath(uint32_t i) const [member function]
cls.add_method('GetMatchedPath',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## config.h (module 'core'): std::size_t ns3::Config::MatchContainer::GetN() const [member function]
cls.add_method('GetN',
'std::size_t',
[],
is_const=True)
## config.h (module 'core'): std::string ns3::Config::MatchContainer::GetPath() const [member function]
cls.add_method('GetPath',
'std::string',
[],
is_const=True)
## config.h (module 'core'): void ns3::Config::MatchContainer::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## nstime.h (module 'core'): ns3::Time ns3::Abs(ns3::Time const & time) [free function]
module.add_function('Abs',
'ns3::Time',
[param('ns3::Time const &', 'time')])
## int64x64.h (module 'core'): ns3::int64x64_t ns3::Abs(ns3::int64x64_t const & value) [free function]
module.add_function('Abs',
'ns3::int64x64_t',
[param('ns3::int64x64_t const &', 'value')])
## breakpoint.h (module 'core'): void ns3::BreakpointFallback() [free function]
module.add_function('BreakpointFallback',
'void',
[])
## vector.h (module 'core'): double ns3::CalculateDistance(ns3::Vector2D const & a, ns3::Vector2D const & b) [free function]
module.add_function('CalculateDistance',
'double',
[param('ns3::Vector2D const &', 'a'), param('ns3::Vector2D const &', 'b')])
## vector.h (module 'core'): double ns3::CalculateDistance(ns3::Vector3D const & a, ns3::Vector3D const & b) [free function]
module.add_function('CalculateDistance',
'double',
[param('ns3::Vector3D const &', 'a'), param('ns3::Vector3D const &', 'b')])
## ptr.h (module 'core'): ns3::Ptr<ns3::ObjectPtrContainerValue> ns3::Create() [free function]
module.add_function('Create',
'ns3::Ptr< ns3::ObjectPtrContainerValue >',
[],
template_parameters=[u'ns3::ObjectPtrContainerValue'])
## ptr.h (module 'core'): ns3::Ptr<ns3::PointerValue> ns3::Create() [free function]
module.add_function('Create',
'ns3::Ptr< ns3::PointerValue >',
[],
template_parameters=[u'ns3::PointerValue'])
## nstime.h (module 'core'): ns3::Time ns3::Days(ns3::int64x64_t value) [free function]
module.add_function('Days',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Days(double value) [free function]
module.add_function('Days',
'ns3::Time',
[param('double', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::FemtoSeconds(ns3::int64x64_t value) [free function]
module.add_function('FemtoSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::FemtoSeconds(uint64_t value) [free function]
module.add_function('FemtoSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## log.h (module 'core'): ns3::LogComponent & ns3::GetLogComponent(std::string const name) [free function]
module.add_function('GetLogComponent',
'ns3::LogComponent &',
[param('std::string const', 'name')])
## hash.h (module 'core'): uint32_t ns3::Hash32(std::string const s) [free function]
module.add_function('Hash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint32_t ns3::Hash32(char const * buffer, std::size_t const size) [free function]
module.add_function('Hash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hash64(std::string const s) [free function]
module.add_function('Hash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hash64(char const * buffer, std::size_t const size) [free function]
module.add_function('Hash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## nstime.h (module 'core'): ns3::Time ns3::Hours(ns3::int64x64_t value) [free function]
module.add_function('Hours',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Hours(double value) [free function]
module.add_function('Hours',
'ns3::Time',
[param('double', 'value')])
## log.h (module 'core'): void ns3::LogComponentDisable(char const * name, ns3::LogLevel level) [free function]
module.add_function('LogComponentDisable',
'void',
[param('char const *', 'name'), param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponentDisableAll(ns3::LogLevel level) [free function]
module.add_function('LogComponentDisableAll',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponentEnable(char const * name, ns3::LogLevel level) [free function]
module.add_function('LogComponentEnable',
'void',
[param('char const *', 'name'), param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponentEnableAll(ns3::LogLevel level) [free function]
module.add_function('LogComponentEnableAll',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponentPrintList() [free function]
module.add_function('LogComponentPrintList',
'void',
[])
## log.h (module 'core'): ns3::LogNodePrinter ns3::LogGetNodePrinter() [free function]
module.add_function('LogGetNodePrinter',
'ns3::LogNodePrinter',
[])
## log.h (module 'core'): ns3::LogTimePrinter ns3::LogGetTimePrinter() [free function]
module.add_function('LogGetTimePrinter',
'ns3::LogTimePrinter',
[])
## log.h (module 'core'): void ns3::LogSetNodePrinter(ns3::LogNodePrinter np) [free function]
module.add_function('LogSetNodePrinter',
'void',
[param('ns3::LogNodePrinter', 'np')])
## log.h (module 'core'): void ns3::LogSetTimePrinter(ns3::LogTimePrinter lp) [free function]
module.add_function('LogSetTimePrinter',
'void',
[param('ns3::LogTimePrinter', 'lp')])
## boolean.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeBooleanChecker() [free function]
module.add_function('MakeBooleanChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## callback.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeCallbackChecker() [free function]
module.add_function('MakeCallbackChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## attribute.h (module 'core'): ns3::Ptr<const ns3::AttributeAccessor> ns3::MakeEmptyAttributeAccessor() [free function]
module.add_function('MakeEmptyAttributeAccessor',
'ns3::Ptr< ns3::AttributeAccessor const >',
[])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeChecker> ns3::MakeEmptyAttributeChecker() [free function]
module.add_function('MakeEmptyAttributeChecker',
'ns3::Ptr< ns3::AttributeChecker >',
[])
## trace-source-accessor.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::MakeEmptyTraceSourceAccessor() [free function]
module.add_function('MakeEmptyTraceSourceAccessor',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[])
## enum.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeEnumChecker(int v1, std::string n1, int v2=0, std::string n2="", int v3=0, std::string n3="", int v4=0, std::string n4="", int v5=0, std::string n5="", int v6=0, std::string n6="", int v7=0, std::string n7="", int v8=0, std::string n8="", int v9=0, std::string n9="", int v10=0, std::string n10="", int v11=0, std::string n11="", int v12=0, std::string n12="", int v13=0, std::string n13="", int v14=0, std::string n14="", int v15=0, std::string n15="", int v16=0, std::string n16="", int v17=0, std::string n17="", int v18=0, std::string n18="", int v19=0, std::string n19="", int v20=0, std::string n20="", int v21=0, std::string n21="", int v22=0, std::string n22="") [free function]
module.add_function('MakeEnumChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('int', 'v1'), param('std::string', 'n1'), param('int', 'v2', default_value='0'), param('std::string', 'n2', default_value='""'), param('int', 'v3', default_value='0'), param('std::string', 'n3', default_value='""'), param('int', 'v4', default_value='0'), param('std::string', 'n4', default_value='""'), param('int', 'v5', default_value='0'), param('std::string', 'n5', default_value='""'), param('int', 'v6', default_value='0'), param('std::string', 'n6', default_value='""'), param('int', 'v7', default_value='0'), param('std::string', 'n7', default_value='""'), param('int', 'v8', default_value='0'), param('std::string', 'n8', default_value='""'), param('int', 'v9', default_value='0'), param('std::string', 'n9', default_value='""'), param('int', 'v10', default_value='0'), param('std::string', 'n10', default_value='""'), param('int', 'v11', default_value='0'), param('std::string', 'n11', default_value='""'), param('int', 'v12', default_value='0'), param('std::string', 'n12', default_value='""'), param('int', 'v13', default_value='0'), param('std::string', 'n13', default_value='""'), param('int', 'v14', default_value='0'), param('std::string', 'n14', default_value='""'), param('int', 'v15', default_value='0'), param('std::string', 'n15', default_value='""'), param('int', 'v16', default_value='0'), param('std::string', 'n16', default_value='""'), param('int', 'v17', default_value='0'), param('std::string', 'n17', default_value='""'), param('int', 'v18', default_value='0'), param('std::string', 'n18', default_value='""'), param('int', 'v19', default_value='0'), param('std::string', 'n19', default_value='""'), param('int', 'v20', default_value='0'), param('std::string', 'n20', default_value='""'), param('int', 'v21', default_value='0'), param('std::string', 'n21', default_value='""'), param('int', 'v22', default_value='0'), param('std::string', 'n22', default_value='""')])
## make-event.h (module 'core'): ns3::EventImpl * ns3::MakeEvent(void (*)( ) f) [free function]
module.add_function('MakeEvent',
'ns3::EventImpl *',
[param('void ( * ) ( )', 'f')])
## object-factory.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeObjectFactoryChecker() [free function]
module.add_function('MakeObjectFactoryChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## string.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeStringChecker() [free function]
module.add_function('MakeStringChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## nstime.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeTimeChecker() [free function]
module.add_function('MakeTimeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## nstime.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeTimeChecker(ns3::Time const min) [free function]
module.add_function('MakeTimeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('ns3::Time const', 'min')])
## nstime.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeTimeChecker(ns3::Time const min, ns3::Time const max) [free function]
module.add_function('MakeTimeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('ns3::Time const', 'min'), param('ns3::Time const', 'max')])
## type-id.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeTypeIdChecker() [free function]
module.add_function('MakeTypeIdChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeVector2DChecker() [free function]
module.add_function('MakeVector2DChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeVector3DChecker() [free function]
module.add_function('MakeVector3DChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::MakeVectorChecker() [free function]
module.add_function('MakeVectorChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## nstime.h (module 'core'): ns3::Time ns3::Max(ns3::Time const & ta, ns3::Time const & tb) [free function]
module.add_function('Max',
'ns3::Time',
[param('ns3::Time const &', 'ta'), param('ns3::Time const &', 'tb')])
## int64x64.h (module 'core'): ns3::int64x64_t ns3::Max(ns3::int64x64_t const & a, ns3::int64x64_t const & b) [free function]
module.add_function('Max',
'ns3::int64x64_t',
[param('ns3::int64x64_t const &', 'a'), param('ns3::int64x64_t const &', 'b')])
## nstime.h (module 'core'): ns3::Time ns3::MicroSeconds(ns3::int64x64_t value) [free function]
module.add_function('MicroSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::MicroSeconds(uint64_t value) [free function]
module.add_function('MicroSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::MilliSeconds(ns3::int64x64_t value) [free function]
module.add_function('MilliSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::MilliSeconds(uint64_t value) [free function]
module.add_function('MilliSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Min(ns3::Time const & ta, ns3::Time const & tb) [free function]
module.add_function('Min',
'ns3::Time',
[param('ns3::Time const &', 'ta'), param('ns3::Time const &', 'tb')])
## int64x64.h (module 'core'): ns3::int64x64_t ns3::Min(ns3::int64x64_t const & a, ns3::int64x64_t const & b) [free function]
module.add_function('Min',
'ns3::int64x64_t',
[param('ns3::int64x64_t const &', 'a'), param('ns3::int64x64_t const &', 'b')])
## nstime.h (module 'core'): ns3::Time ns3::Minutes(ns3::int64x64_t value) [free function]
module.add_function('Minutes',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Minutes(double value) [free function]
module.add_function('Minutes',
'ns3::Time',
[param('double', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::NanoSeconds(ns3::int64x64_t value) [free function]
module.add_function('NanoSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::NanoSeconds(uint64_t value) [free function]
module.add_function('NanoSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## simulator.h (module 'core'): ns3::Time ns3::Now() [free function]
module.add_function('Now',
'ns3::Time',
[])
## nstime.h (module 'core'): ns3::Time ns3::PicoSeconds(ns3::int64x64_t value) [free function]
module.add_function('PicoSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::PicoSeconds(uint64_t value) [free function]
module.add_function('PicoSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Seconds(ns3::int64x64_t value) [free function]
module.add_function('Seconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Seconds(double value) [free function]
module.add_function('Seconds',
'ns3::Time',
[param('double', 'value')])
## test.h (module 'core'): bool ns3::TestDoubleIsEqual(double const a, double const b, double const epsilon=std::numeric_limits<double>::epsilon()) [free function]
module.add_function('TestDoubleIsEqual',
'bool',
[param('double const', 'a'), param('double const', 'b'), param('double const', 'epsilon', default_value='std::numeric_limits<double>::epsilon()')])
## nstime.h (module 'core'): ns3::Time ns3::TimeStep(uint64_t ts) [free function]
module.add_function('TimeStep',
'ns3::Time',
[param('uint64_t', 'ts')])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'signed char'])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'short'])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'int'])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'long'])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'unsigned char'])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'unsigned short'])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'unsigned int'])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'unsigned long long'])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'float'])
## type-name.h (module 'core'): std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=[u'double'])
## nstime.h (module 'core'): ns3::Time ns3::Years(ns3::int64x64_t value) [free function]
module.add_function('Years',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Years(double value) [free function]
module.add_function('Years',
'ns3::Time',
[param('double', 'value')])
register_functions_ns3_CommandLineHelper(module.add_cpp_namespace('CommandLineHelper'), root_module)
register_functions_ns3_Config(module.add_cpp_namespace('Config'), root_module)
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_SystemPath(module.add_cpp_namespace('SystemPath'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.add_cpp_namespace('internal'), root_module)
register_functions_ns3_tests(module.add_cpp_namespace('tests'), root_module)
return
def register_functions_ns3_CommandLineHelper(module, root_module):
## command-line.h (module 'core'): std::string ns3::CommandLineHelper::GetDefault(bool const & val) [free function]
module.add_function('GetDefault',
'std::string',
[param('bool const &', 'val')],
template_parameters=[u'bool'])
## command-line.h (module 'core'): bool ns3::CommandLineHelper::UserItemParse(std::string const value, bool & val) [free function]
module.add_function('UserItemParse',
'bool',
[param('std::string const', 'value'), param('bool &', 'val')],
template_parameters=[u'bool'])
return
def register_functions_ns3_Config(module, root_module):
## config.h (module 'core'): void ns3::Config::Connect(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('Connect',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::ConnectWithoutContext(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('ConnectWithoutContext',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::Disconnect(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('Disconnect',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::DisconnectWithoutContext(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('DisconnectWithoutContext',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Config::GetRootNamespaceObject(uint32_t i) [free function]
module.add_function('GetRootNamespaceObject',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')])
## config.h (module 'core'): std::size_t ns3::Config::GetRootNamespaceObjectN() [free function]
module.add_function('GetRootNamespaceObjectN',
'std::size_t',
[])
## config.h (module 'core'): ns3::Config::MatchContainer ns3::Config::LookupMatches(std::string path) [free function]
module.add_function('LookupMatches',
'ns3::Config::MatchContainer',
[param('std::string', 'path')])
## config.h (module 'core'): void ns3::Config::RegisterRootNamespaceObject(ns3::Ptr<ns3::Object> obj) [free function]
module.add_function('RegisterRootNamespaceObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'obj')])
## config.h (module 'core'): void ns3::Config::Reset() [free function]
module.add_function('Reset',
'void',
[])
## config.h (module 'core'): void ns3::Config::Set(std::string path, ns3::AttributeValue const & value) [free function]
module.add_function('Set',
'void',
[param('std::string', 'path'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): void ns3::Config::SetDefault(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetDefault',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): bool ns3::Config::SetDefaultFailSafe(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetDefaultFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): void ns3::Config::SetGlobal(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetGlobal',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): bool ns3::Config::SetGlobalFailSafe(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetGlobalFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): void ns3::Config::UnregisterRootNamespaceObject(ns3::Ptr<ns3::Object> obj) [free function]
module.add_function('UnregisterRootNamespaceObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'obj')])
return
def register_functions_ns3_FatalImpl(module, root_module):
## fatal-impl.h (module 'core'): void ns3::FatalImpl::FlushStreams() [free function]
module.add_function('FlushStreams',
'void',
[])
## fatal-impl.h (module 'core'): void ns3::FatalImpl::RegisterStream(std::ostream * stream) [free function]
module.add_function('RegisterStream',
'void',
[param('std::ostream *', 'stream')])
## fatal-impl.h (module 'core'): void ns3::FatalImpl::UnregisterStream(std::ostream * stream) [free function]
module.add_function('UnregisterStream',
'void',
[param('std::ostream *', 'stream')])
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_SystemPath(module, root_module):
## system-path.h (module 'core'): std::string ns3::SystemPath::Append(std::string left, std::string right) [free function]
module.add_function('Append',
'std::string',
[param('std::string', 'left'), param('std::string', 'right')])
## system-path.h (module 'core'): std::string ns3::SystemPath::FindSelfDirectory() [free function]
module.add_function('FindSelfDirectory',
'std::string',
[])
## system-path.h (module 'core'): std::string ns3::SystemPath::Join(std::list<std::basic_string<char>, std::allocator<std::basic_string<char> > >::const_iterator begin, std::list<std::basic_string<char>, std::allocator<std::basic_string<char> > >::const_iterator end) [free function]
module.add_function('Join',
'std::string',
[param('std::list< std::string > const_iterator', 'begin'), param('std::list< std::string > const_iterator', 'end')])
## system-path.h (module 'core'): void ns3::SystemPath::MakeDirectories(std::string path) [free function]
module.add_function('MakeDirectories',
'void',
[param('std::string', 'path')])
## system-path.h (module 'core'): std::string ns3::SystemPath::MakeTemporaryDirectoryName() [free function]
module.add_function('MakeTemporaryDirectoryName',
'std::string',
[])
## system-path.h (module 'core'): std::list<std::basic_string<char>, std::allocator<std::basic_string<char> > > ns3::SystemPath::ReadFiles(std::string path) [free function]
module.add_function('ReadFiles',
'std::list< std::string >',
[param('std::string', 'path')])
## system-path.h (module 'core'): std::list<std::basic_string<char>, std::allocator<std::basic_string<char> > > ns3::SystemPath::Split(std::string path) [free function]
module.add_function('Split',
'std::list< std::string >',
[param('std::string', 'path')])
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
## double.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::internal::MakeDoubleChecker(double min, double max, std::string name) [free function]
module.add_function('MakeDoubleChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('double', 'min'), param('double', 'max'), param('std::string', 'name')])
## integer.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::internal::MakeIntegerChecker(int64_t min, int64_t max, std::string name) [free function]
module.add_function('MakeIntegerChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('int64_t', 'min'), param('int64_t', 'max'), param('std::string', 'name')])
## uinteger.h (module 'core'): ns3::Ptr<const ns3::AttributeChecker> ns3::internal::MakeUintegerChecker(uint64_t min, uint64_t max, std::string name) [free function]
module.add_function('MakeUintegerChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('uint64_t', 'min'), param('uint64_t', 'max'), param('std::string', 'name')])
return
def register_functions_ns3_tests(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| 63.813393
| 1,926
| 0.626802
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.core', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
RROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'])
::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
e * > const_iterator', u'ns3::GlobalValue::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::GlobalValue * > const_iterator*', u'ns3::GlobalValue::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::GlobalValue * > const_iterator&', u'ns3::GlobalValue::Iterator&')
tToType< 0 >'])
])
IntToType< 1 >'])
])
IntToType< 2 >'])
])
IntToType< 3 >'])
])
IntToType< 4 >'])
])
IntToType< 5 >'])
])
IntToType< 6 >'])
lers.add_type_alias(u'std::map< std::string, ns3::LogComponent * >', u'ns3::LogComponent::ComponentList')
typehandlers.add_type_alias(u'std::map< std::string, ns3::LogComponent * >*', u'ns3::LogComponent::ComponentList*')
typehandlers.add_type_alias(u'std::map< std::string, ns3::LogComponent * >&', u'ns3::LogComponent::ComponentList&')
d')
ue)
tingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
ity='private')
odule['ns3::Simulator'])
cs'], parent=root_module['ns3::NonCopyable'])
'], outer_class=root_module['ns3::Timer'])
rue)
'ATTR_SGC'], outer_class=root_module['ns3::TypeId'])
SOLETE'], outer_class=root_module['ns3::TypeId'])
ns3::TypeId'])
ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
on< ns3::DesMetrics >'])
ule['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
['ns3::Object'])
'])
['ns3::Object'])
Scheduler'])
Scheduler'])
VariableStream'])
leter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
eleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
tDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
eleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
efaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
DefaultDeleter<ns3::FdReader>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
er<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
ultDeleter<ns3::RefCountBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
ultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
ter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
::Object'])
s3::Object'])
3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
typehandlers.add_type_alias(u'pthread_t', u'ns3::SystemThread::ThreadId')
typehandlers.add_type_alias(u'pthread_t*', u'ns3::SystemThread::ThreadId*')
typehandlers.add_type_alias(u'pthread_t&', u'ns3::SystemThread::ThreadId&')
'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )', u'ns3::Time::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )*', u'ns3::Time::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )&', u'ns3::Time::TracedCallback&')
erts_to(root_module['ns3::int64x64_t'])
eRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
VariableStream'])
VariableStream'])
nizer'])
VariableStream'])
VariableStream'])
VariableStream'])
['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
le['ns3::AttributeChecker'])
le['ns3::AttributeValue'])
heduler'])
e['ns3::AttributeChecker'])
e['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
e['ns3::AttributeValue'])
VariableStream'])
torImpl'])
VariableStream'])
ule['ns3::AttributeValue'])
VariableStream'])
['ns3::AttributeAccessor'])
['ns3::AttributeChecker'])
['ns3::AttributeValue'])
odule['ns3::AttributeChecker'])
odule['ns3::AttributeValue'])
VariableStream'])
'ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
VariableStream'])
::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
VariableStream'])
::Scheduler'])
le['ns3::AttributeValue'])
::Scheduler'])
VariableStream'])
3::Scheduler'])
VariableStream'])
::AttributeChecker'])
::AttributeValue'])
ibuteAccessor'])
ibuteChecker'])
ibuteValue'])
typehandlers.add_type_alias(u'std::map< unsigned long long, ns3::Ptr< ns3::Object > > const_iterator', u'ns3::ObjectPtrContainerValue::Iterator')
typehandlers.add_type_alias(u'std::map< unsigned long long, ns3::Ptr< ns3::Object > > const_iterator*', u'ns3::ObjectPtrContainerValue::Iterator*')
typehandlers.add_type_alias(u'std::map< unsigned long long, ns3::Ptr< ns3::Object > > const_iterator&', u'ns3::ObjectPtrContainerValue::Iterator&')
VariableStream'])
le['ns3::AttributeChecker'])
le['ns3::AttributeValue'])
orImpl'])
le['ns3::RealtimeSimulatorImpl'])
::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
ule['ns3::AttributeChecker'])
ule['ns3::AttributeValue'])
ule['ns3::AttributeValue'])
le['ns3::AttributeChecker'])
le['ns3::AttributeValue'])
e['ns3::AttributeValue'])
ule['ns3::AttributeChecker'])
ule['ns3::AttributeValue'])
ule['ns3::AttributeChecker'])
ule['ns3::AttributeValue'])
::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_container('std::vector< std::string >', 'std::string', container_type=u'vector')
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type=u'map')
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
typehandlers.add_type_alias(u'ns3::RngSeedManager', u'ns3::SeedManager')
typehandlers.add_type_alias(u'ns3::RngSeedManager*', u'ns3::SeedManager*')
typehandlers.add_type_alias(u'ns3::RngSeedManager&', u'ns3::SeedManager&')
module.add_typedef(root_module['ns3::RngSeedManager'], 'SeedManager')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue', u'ns3::ObjectVectorValue')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue*', u'ns3::ObjectVectorValue*')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue&', u'ns3::ObjectVectorValue&')
module.add_typedef(root_module['ns3::ObjectPtrContainerValue'], 'ObjectVectorValue')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue', u'ns3::ObjectMapValue')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue*', u'ns3::ObjectMapValue*')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue&', u'ns3::ObjectMapValue&')
module.add_typedef(root_module['ns3::ObjectPtrContainerValue'], 'ObjectMapValue')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )', u'ns3::LogTimePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )*', u'ns3::LogTimePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )&', u'ns3::LogTimePrinter&')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )', u'ns3::LogNodePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )*', u'ns3::LogNodePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )&', u'ns3::LogNodePrinter&')
eHelper')
register_types_ns3_CommandLineHelper(nested_module)
'Config')
register_types_ns3_Config(nested_module)
talImpl')
register_types_ns3_FatalImpl(nested_module)
e('Hash')
register_types_ns3_Hash(nested_module)
temPath')
register_types_ns3_SystemPath(nested_module)
allback')
register_types_ns3_TracedValueCallback(nested_module)
nternal')
register_types_ns3_internal(nested_module)
('tests')
register_types_ns3_tests(nested_module)
def register_types_ns3_CommandLineHelper(module):
root_module = module.get_root()
def register_types_ns3_Config(module):
root_module = module.get_root()
pe_alias(u'std::vector< ns3::Ptr< ns3::Object > > const_iterator', u'ns3::Config::MatchContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Object > > const_iterator*', u'ns3::Config::MatchContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Object > > const_iterator&', u'ns3::Config::MatchContainer::Iterator&')
module.add_container('std::vector< ns3::Ptr< ns3::Object > >', 'ns3::Ptr< ns3::Object >', container_type=u'vector')
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
pleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash64Function_ptr&')
unction')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
plementation'])
ntation'])
ntation'])
entation'])
def register_types_ns3_SystemPath(module):
root_module = module.get_root()
module.add_container('std::list< std::string >', 'std::string', container_type=u'list')
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time )', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time )*', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time )&', u'ns3::TracedValueCallback::Time&')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool )', u'ns3::TracedValueCallback::Bool')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool )*', u'ns3::TracedValueCallback::Bool*')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool )&', u'ns3::TracedValueCallback::Bool&')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t )', u'ns3::TracedValueCallback::Int8')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t )*', u'ns3::TracedValueCallback::Int8*')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t )&', u'ns3::TracedValueCallback::Int8&')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t )', u'ns3::TracedValueCallback::Uint8')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t )*', u'ns3::TracedValueCallback::Uint8*')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t )&', u'ns3::TracedValueCallback::Uint8&')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t )', u'ns3::TracedValueCallback::Int16')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t )*', u'ns3::TracedValueCallback::Int16*')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t )&', u'ns3::TracedValueCallback::Int16&')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t )', u'ns3::TracedValueCallback::Uint16')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t )*', u'ns3::TracedValueCallback::Uint16*')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t )&', u'ns3::TracedValueCallback::Uint16&')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t )', u'ns3::TracedValueCallback::Int32')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t )*', u'ns3::TracedValueCallback::Int32*')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t )&', u'ns3::TracedValueCallback::Int32&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )', u'ns3::TracedValueCallback::Uint32')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )*', u'ns3::TracedValueCallback::Uint32*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )&', u'ns3::TracedValueCallback::Uint32&')
typehandlers.add_type_alias(u'void ( * ) ( double, double )', u'ns3::TracedValueCallback::Double')
typehandlers.add_type_alias(u'void ( * ) ( double, double )*', u'ns3::TracedValueCallback::Double*')
typehandlers.add_type_alias(u'void ( * ) ( double, double )&', u'ns3::TracedValueCallback::Double&')
typehandlers.add_type_alias(u'void ( * ) ( )', u'ns3::TracedValueCallback::Void')
typehandlers.add_type_alias(u'void ( * ) ( )*', u'ns3::TracedValueCallback::Void*')
typehandlers.add_type_alias(u'void ( * ) ( )&', u'ns3::TracedValueCallback::Void&')
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_tests(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3CommandLine_methods(root_module, root_module['ns3::CommandLine'])
register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3SystemThread_methods(root_module, root_module['ns3::DefaultDeleter< ns3::SystemThread >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3GlobalValue_methods(root_module, root_module['ns3::GlobalValue'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >'])
register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >'])
register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >'])
register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >'])
register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >'])
register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >'])
register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3Names_methods(root_module, root_module['ns3::Names'])
register_Ns3NonCopyable_methods(root_module, root_module['ns3::NonCopyable'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger'])
register_Ns3RandomVariableStreamHelper_methods(root_module, root_module['ns3::RandomVariableStreamHelper'])
register_Ns3RngSeedManager_methods(root_module, root_module['ns3::RngSeedManager'])
register_Ns3RngStream_methods(root_module, root_module['ns3::RngStream'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Singleton__Ns3DesMetrics_methods(root_module, root_module['ns3::Singleton< ns3::DesMetrics >'])
register_Ns3SystemCondition_methods(root_module, root_module['ns3::SystemCondition'])
register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex'])
register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3Timer_methods(root_module, root_module['ns3::Timer'])
register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Watchdog_methods(root_module, root_module['ns3::Watchdog'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3DesMetrics_methods(root_module, root_module['ns3::DesMetrics'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3Scheduler_methods(root_module, root_module['ns3::Scheduler'])
register_Ns3SchedulerEvent_methods(root_module, root_module['ns3::Scheduler::Event'])
register_Ns3SchedulerEventKey_methods(root_module, root_module['ns3::Scheduler::EventKey'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3RefCountBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3RefCountBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SimulatorImpl_methods(root_module, root_module['ns3::SimulatorImpl'])
register_Ns3Synchronizer_methods(root_module, root_module['ns3::Synchronizer'])
register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WallClockSynchronizer_methods(root_module, root_module['ns3::WallClockSynchronizer'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CalendarScheduler_methods(root_module, root_module['ns3::CalendarScheduler'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DefaultSimulatorImpl_methods(root_module, root_module['ns3::DefaultSimulatorImpl'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3HeapScheduler_methods(root_module, root_module['ns3::HeapScheduler'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3ListScheduler_methods(root_module, root_module['ns3::ListScheduler'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3MapScheduler_methods(root_module, root_module['ns3::MapScheduler'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3ObjectPtrContainerAccessor_methods(root_module, root_module['ns3::ObjectPtrContainerAccessor'])
register_Ns3ObjectPtrContainerChecker_methods(root_module, root_module['ns3::ObjectPtrContainerChecker'])
register_Ns3ObjectPtrContainerValue_methods(root_module, root_module['ns3::ObjectPtrContainerValue'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3PointerChecker_methods(root_module, root_module['ns3::PointerChecker'])
register_Ns3PointerValue_methods(root_module, root_module['ns3::PointerValue'])
register_Ns3RealtimeSimulatorImpl_methods(root_module, root_module['ns3::RealtimeSimulatorImpl'])
register_Ns3RefCountBase_methods(root_module, root_module['ns3::RefCountBase'])
register_Ns3StringChecker_methods(root_module, root_module['ns3::StringChecker'])
register_Ns3StringValue_methods(root_module, root_module['ns3::StringValue'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3CallbackImpl__Bool_StdBasic_string__lt__char__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< bool, std::basic_string<char>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Unsigned_char___star___Long_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3ConfigMatchContainer_methods(root_module, root_module['ns3::Config::MatchContainer'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
e)
is_const=True)
('char * *', 'argv')])
)
'usage')])
return
def register_Ns3CriticalSection_methods(root_module, cls):
s):
_methods(root_module, cls):
&', 'value')],
is_static=True)
&', 'value')],
is_static=True)
is_static=True)
is_const=True)
is_const=True)
is_const=True)
')],
is_const=True)
&', 'value')],
is_static=True)
&', 'value')],
is_static=True)
'value')])
return
def register_Ns3Hasher_methods(root_module, cls):
s')])
aram('std::size_t const', 'size')])
::string const', 's')])
[])
return
def register_Ns3IntToType__0_methods(root_module, cls):
is_const=True)
is_static=True)
'level')],
is_static=True)
, 'level')],
is_const=True)
is_const=True)
is_const=True)
const', 'level')])
return
def register_Ns3Names_methods(root_module, cls):
is_static=True)
ram('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
[],
is_static=True)
ct >', 'object')],
is_static=True)
ct >', 'object')],
is_static=True)
('std::string', 'newname')],
is_static=True)
ame'), param('std::string', 'newname')],
is_static=True)
dname'), param('std::string', 'newname')],
is_static=True)
return
def register_Ns3NonCopyable_methods(root_module, cls):
return
def register_Ns3ObjectBase_methods(root_module, cls):
],
is_const=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_static=True)
lue const &', 'value')])
lue const &', 'value')])
'ns3::CallbackBase const &', 'cb')])
ckBase const &', 'cb')])
'ns3::CallbackBase const &', 'cb')])
ckBase const &', 'cb')])
tributes')],
visibility='protected')
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
ctFactory_methods(root_module, cls):
cls.add_output_stream_operator()
er_Ns3ParameterLogger_methods(root_module, cls):
dule, cls):
am_methods(root_module, cls):
impleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
c=True)
is_static=True)
is_static=True)
is_static=True)
'id')],
is_static=True)
is_static=True)
is_static=True)
'id')],
is_static=True)
'impl')],
is_static=True)
tory')],
is_static=True)
is_static=True)
elay')],
is_static=True)
return
def register_Ns3Singleton__Ns3DesMetrics_methods(root_module, cls):
is_static=True)
ule, cls):
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[])
[])
[])
'ns3::Time', 'delay')])
me const &', 'delay')])
[])
return
def register_Ns3TimerImpl_methods(root_module, cls):
, 'delay')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
am('std::string const &', 'supportMsg', default_value='""')])
el', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
lt_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
ize_t', 'i')],
is_const=True)
ize_t', 'i')],
is_const=True)
],
is_const=True)
[],
is_const=True)
],
is_const=True)
],
is_const=True)
],
is_const=True)
],
is_const=True)
16_t', 'i')],
is_static=True)
,
is_static=True)
],
is_const=True)
ize_t', 'i')],
is_const=True)
],
is_const=True)
],
is_const=True)
],
is_const=True)
],
is_const=True)
[])
d', 'other')],
is_const=True)
eInformation *', 'info', transfer_ownership=False)],
is_const=True)
],
is_static=True)
d *', 'tid')],
is_static=True)
g', 'name')],
is_static=True)
ring', 'name')],
is_const=True)
rceInformation *', 'info')],
is_const=True)
],
is_const=True)
eValue const >', 'initialValue')])
ing', 'groupName')])
3::TypeId', 'tid')])
::size_t', 'size')])
'uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
t=False)
_const=False)
alse)
rn
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
rison_operator('<')
cls.add_binary_numeric_operator('-', root_module['ns3::Vector2D'], root_module['ns3::Vector2D'], param('ns3::Vector2D const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Vector2D'], root_module['ns3::Vector2D'], param('ns3::Vector2D const &', u'right'))
eturn
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
cls.add_binary_numeric_operator('-', root_module['ns3::Vector3D'], root_module['ns3::Vector3D'], param('ns3::Vector3D const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Vector3D'], root_module['ns3::Vector3D'], param('ns3::Vector3D const &', u'right'))
False)
=False)
return
def register_Ns3Watchdog_methods(root_module, cls):
root_module, cls):
(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
is_const=True)
[],
is_const=True, is_virtual=True)
TypeId', 'tid')],
is_const=True, template_parameters=[u'ns3::Object'], custom_template_method_name=u'GetObject')
],
is_static=True)
[])
[],
is_const=True)
visibility='protected')
[],
visibility='protected', is_virtual=True)
[],
visibility='protected', is_virtual=True)
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
al=True, is_virtual=True)
st=True, visibility='protected')
return
def register_Ns3Scheduler_methods(root_module, cls):
is_pure_virtual=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
nst &', 'ev')],
is_pure_virtual=True, is_virtual=True)
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SchedulerEvent_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('>')
f register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
)],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Synchronizer_methods(root_module, cls):
nt64_t', 'tsDelay')])
is_pure_virtual=True, visibility='protected', is_virtual=True)
is_pure_virtual=True, visibility='protected', is_virtual=True)
is_pure_virtual=True, visibility='protected', is_virtual=True)
, 'ns')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
is_pure_virtual=True, visibility='protected', is_virtual=True)
'arg0')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
, 'ns')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
is_pure_virtual=True, visibility='protected', is_virtual=True)
nt64_t', 'nsDelay')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3SystemThread_methods(root_module, cls):
ster_Ns3Time_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
'ns3::Time::Unit', 'unit')],
is_static=True)
'ns3::Time::Unit', 'unit')],
is_static=True)
'ns3::Time::Unit', 'unit')],
is_static=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_static=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_const=True)
[],
is_static=True)
[],
is_static=True)
'resolution')],
is_static=True)
[],
is_static=True)
:Unit', 'unit')],
is_const=True)
:Unit', 'unit')],
is_const=True)
:Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
&', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
ackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
rue)
=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
c=True)
return
def register_Ns3WallClockSynchronizer_methods(root_module, cls):
ual=True)
sibility='protected', is_virtual=True)
visibility='protected', is_virtual=True)
visibility='protected', is_virtual=True)
sibility='protected', is_virtual=True)
,
visibility='protected', is_virtual=True)
,
visibility='protected')
sibility='protected')
sibility='protected')
,
visibility='protected')
visibility='protected')
visibility='protected')
, 'result')],
visibility='protected')
visibility='protected')
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
c=True)
virtual=True)
s_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
atic=True)
egister_Ns3ZipfRandomVariable_methods(root_module, cls):
atic=True)
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
s3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
,
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
')],
is_const=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
checker')],
is_pure_virtual=True, is_virtual=True)
er')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
_stream_operator()
s_virtual=True)
is_const=True)
hecker')],
is_const=True, is_virtual=True)
l', 'value')])
return
def register_Ns3CalendarScheduler_methods(root_module, cls):
virtual=True)
is_virtual=True)
is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
ual=True, is_const=True, is_virtual=True)
],
is_static=True, visibility='protected')
is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
is_static=True, visibility='protected', template_parameters=[u'bool'])
is_static=True, visibility='protected', template_parameters=[u'std::__cxx11::basic_string<char', u' std::char_traits<char>', u' std::allocator<char> > '])
is_static=True, visibility='protected', template_parameters=[u'void'])
is_static=True, visibility='protected', template_parameters=[u'unsigned char*'])
is_static=True, visibility='protected', template_parameters=[u'long'])
return
def register_Ns3CallbackValue_methods(root_module, cls):
e)
cker')],
is_const=True, is_virtual=True)
', 'base')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
=True)
mpl_methods(root_module, cls):
s_virtual=True)
st=True, is_virtual=True)
ic=True)
is_const=True, is_virtual=True)
st=True, is_virtual=True)
st=True, is_virtual=True)
is_virtual=True)
is_virtual=True)
)],
is_virtual=True)
is_virtual=True)
is_virtual=True)
*', 'event')],
is_virtual=True)
is_virtual=True)
is_virtual=True)
is_virtual=True)
visibility='private', is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
)
s):
is_virtual=True)
is_const=True)
'checker')],
is_const=True, is_virtual=True)
t &', 'value')])
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
'r')],
visibility='private', is_virtual=True)
ibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
t=True, is_virtual=True)
alue')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
visibility='private', is_virtual=True)
,
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
)
, 'value')],
is_const=True, is_virtual=True)
ributeValue &', 'dst')],
is_const=True, is_virtual=True)
],
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
>', 'checker')],
is_virtual=True)
],
is_const=True)
>', 'checker')],
is_const=True, is_virtual=True)
am('int', 'value')])
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
ic=True)
True)
return
def register_Ns3EventImpl_methods(root_module, cls):
sibility='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
ue)
er_methods(root_module, cls):
ue, visibility='protected', is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
tic=True)
virtual=True)
return
def register_Ns3HeapScheduler_methods(root_module, cls):
t=True, is_virtual=True)
is_const=True, is_virtual=True)
ev')],
is_virtual=True)
is_virtual=True)
return
def register_Ns3IntegerValue_methods(root_module, cls):
s_virtual=True)
is_const=True)
hecker')],
is_const=True, is_virtual=True)
&', 'value')])
return
def register_Ns3ListScheduler_methods(root_module, cls):
t=True, is_virtual=True)
is_const=True, is_virtual=True)
ev')],
is_virtual=True)
is_virtual=True)
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
True)
gister_Ns3MapScheduler_methods(root_module, cls):
is_const=True, is_virtual=True)
is_const=True, is_virtual=True)
'ev')],
is_virtual=True)
is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
_t', 'bound')])
is_virtual=True)
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
jectPtrContainerAccessor_methods(root_module, cls):
)],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ObjectPtrContainerChecker_methods(root_module, cls):
ual=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
ic=True)
'bound')])
_t', 'bound')])
is_virtual=True)
is_virtual=True)
return
def register_Ns3PointerChecker_methods(root_module, cls):
ef register_Ns3PointerValue_methods(root_module, cls):
is_virtual=True)
is_const=True)
hecker')],
is_const=True, is_virtual=True)
>', 'object')])
return
def register_Ns3RealtimeSimulatorImpl_methods(root_module, cls):
tual=True)
=True)
=True, is_virtual=True)
=True)
is_const=True, is_virtual=True)
=True, is_virtual=True)
=True, is_virtual=True)
=True)
is_virtual=True)
_virtual=True)
,
is_virtual=True)
is_virtual=True)
is_virtual=True)
)
', 'event')])
', 'event')],
is_virtual=True)
is_virtual=True)
al=True)
is_virtual=True)
sibility='private', is_virtual=True)
return
def register_Ns3RefCountBase_methods(root_module, cls):
is_const=True)
'checker')],
is_const=True, is_virtual=True)
t &', 'value')])
return
def register_Ns3TimeValue_methods(root_module, cls):
ker')],
is_virtual=True)
is_const=True)
, 'checker')],
is_const=True, is_virtual=True)
nst &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
irtual=True)
is_const=True)
checker')],
is_const=True, is_virtual=True)
&', 'value')])
return
def register_Ns3UintegerValue_methods(root_module, cls):
e)
is_const=True)
cker')],
is_const=True, is_virtual=True)
, 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
is_const=True)
hecker')],
is_const=True, is_virtual=True)
&', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
is_const=True)
hecker')],
is_const=True, is_virtual=True)
&', 'value')])
return
def register_Ns3CallbackImpl__Bool_StdBasic_string__lt__char__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
int64_t', 'value')])
[param('std::string const', 'name')])
[param('std::string const', 's')])
ar const *', 'buffer'), param('std::size_t const', 'size')])
[param('std::string const', 's')])
ar const *', 'buffer'), param('std::size_t const', 'size')])
[param('ns3::int64x64_t', 'value')])
[param('double', 'value')])
am('char const *', 'name'), param('ns3::LogLevel', 'level')])
[param('ns3::LogLevel', 'level')])
am('char const *', 'name'), param('ns3::LogLevel', 'level')])
[param('ns3::LogLevel', 'level')])
'void',
[])
rinter',
[])
rinter',
[])
[param('ns3::LogNodePrinter', 'np')])
[param('ns3::LogTimePrinter', 'lp')])
st >',
[])
t >',
[])
>',
[])
>',
[])
[])
string', 'n8', default_value='""'), param('int', 'v9', default_value='0'), param('std::string', 'n9', default_value='""'), param('int', 'v10', default_value='0'), param('std::string', 'n10', default_value='""'), param('int', 'v11', default_value='0'), param('std::string', 'n11', default_value='""'), param('int', 'v12', default_value='0'), param('std::string', 'n12', default_value='""'), param('int', 'v13', default_value='0'), param('std::string', 'n13', default_value='""'), param('int', 'v14', default_value='0'), param('std::string', 'n14', default_value='""'), param('int', 'v15', default_value='0'), param('std::string', 'n15', default_value='""'), param('int', 'v16', default_value='0'), param('std::string', 'n16', default_value='""'), param('int', 'v17', default_value='0'), param('std::string', 'n17', default_value='""'), param('int', 'v18', default_value='0'), param('std::string', 'n18', default_value='""'), param('int', 'v19', default_value='0'), param('std::string', 'n19', default_value='""'), param('int', 'v20', default_value='0'), param('std::string', 'n20', default_value='""'), param('int', 'v21', default_value='0'), param('std::string', 'n21', default_value='""'), param('int', 'v22', default_value='0'), param('std::string', 'n22', default_value='""')])
[param('void ( * ) ( )', 'f')])
[])
nst >',
[])
nst >',
[])
[param('ns3::Time const', 'min')])
'ns3::Time const', 'min'), param('ns3::Time const', 'max')])
st >',
[])
nst >',
[])
nst >',
[])
nst >',
[])
::Time const &', 'ta'), param('ns3::Time const &', 'tb')])
const &', 'a'), param('ns3::int64x64_t const &', 'b')])
[param('ns3::int64x64_t', 'value')])
[param('uint64_t', 'value')])
[param('ns3::int64x64_t', 'value')])
[param('uint64_t', 'value')])
::Time const &', 'ta'), param('ns3::Time const &', 'tb')])
const &', 'a'), param('ns3::int64x64_t const &', 'b')])
[param('ns3::int64x64_t', 'value')])
[param('double', 'value')])
[param('ns3::int64x64_t', 'value')])
[param('uint64_t', 'value')])
',
[])
[param('ns3::int64x64_t', 'value')])
[param('uint64_t', 'value')])
[param('ns3::int64x64_t', 'value')])
[param('double', 'value')])
aram('double const', 'epsilon', default_value='std::numeric_limits<double>::epsilon()')])
[param('uint64_t', 'ts')])
',
[],
template_parameters=[u'signed char'])
',
[],
template_parameters=[u'short'])
',
[],
template_parameters=[u'int'])
',
[],
template_parameters=[u'long'])
',
[],
template_parameters=[u'unsigned char'])
',
[],
template_parameters=[u'unsigned short'])
',
[],
template_parameters=[u'unsigned int'])
',
[],
template_parameters=[u'unsigned long long'])
',
[],
template_parameters=[u'float'])
',
[],
template_parameters=[u'double'])
[param('ns3::int64x64_t', 'value')])
[param('double', 'value')])
register_functions_ns3_CommandLineHelper(module.add_cpp_namespace('CommandLineHelper'), root_module)
register_functions_ns3_Config(module.add_cpp_namespace('Config'), root_module)
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_SystemPath(module.add_cpp_namespace('SystemPath'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.add_cpp_namespace('internal'), root_module)
register_functions_ns3_tests(module.add_cpp_namespace('tests'), root_module)
return
def register_functions_ns3_CommandLineHelper(module, root_module):
ol const &', 'val')],
template_parameters=[u'bool'])
value'), param('bool &', 'val')],
template_parameters=[u'bool'])
return
def register_functions_ns3_Config(module, root_module):
path'), param('ns3::CallbackBase const &', 'cb')])
path'), param('ns3::CallbackBase const &', 'cb')])
path'), param('ns3::CallbackBase const &', 'cb')])
path'), param('ns3::CallbackBase const &', 'cb')])
[param('uint32_t', 'i')])
[])
[param('std::string', 'path')])
aram('ns3::Ptr< ns3::Object >', 'obj')])
[])
), param('ns3::AttributeValue const &', 'value')])
), param('ns3::AttributeValue const &', 'value')])
), param('ns3::AttributeValue const &', 'value')])
), param('ns3::AttributeValue const &', 'value')])
), param('ns3::AttributeValue const &', 'value')])
aram('ns3::Ptr< ns3::Object >', 'obj')])
return
def register_functions_ns3_FatalImpl(module, root_module):
[])
m('std::ostream *', 'stream')])
m('std::ostream *', 'stream')])
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_SystemPath(module, root_module):
'left'), param('std::string', 'right')])
[])
ueCallback(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
'min'), param('double', 'max'), param('std::string', 'name')])
in'), param('int64_t', 'max'), param('std::string', 'name')])
'), param('uint64_t', 'max'), param('std::string', 'name')])
return
def register_functions_ns3_tests(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| true
| true
|
1c430afc55313db5124cc8f641ee344a3db59895
| 12,317
|
py
|
Python
|
src/comp_varDA.py
|
m214089/lorenz-da
|
da02fddcac6eb85e285843da35bf1a3e7c07fe62
|
[
"Apache-2.0"
] | null | null | null |
src/comp_varDA.py
|
m214089/lorenz-da
|
da02fddcac6eb85e285843da35bf1a3e7c07fe62
|
[
"Apache-2.0"
] | null | null | null |
src/comp_varDA.py
|
m214089/lorenz-da
|
da02fddcac6eb85e285843da35bf1a3e7c07fe62
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
###############################################################
# < next few lines under version control, D O N O T E D I T >
# $Date$
# $Revision$
# $Author$
# $Id$
###############################################################
###############################################################
# comp_varDA.py - compare the effects of inflating static cov
# on the performance of a variational DA
###############################################################
###############################################################
__author__ = "Rahul Mahajan"
__email__ = "rahul.mahajan@nasa.gov"
__copyright__ = "Copyright 2012, NASA / GSFC / GMAO"
__license__ = "GPL"
__status__ = "Prototype"
###############################################################
###############################################################
import sys
import numpy as np
from matplotlib import pyplot
from argparse import ArgumentParser,ArgumentDefaultsHelpFormatter
from netCDF4 import Dataset
from module_IO import *
###############################################################
###############################################################
def main():
# name of starting ensDA output diagnostic file, starting index and measure
parser = ArgumentParser(description='compare the diag files written by varDA.py',formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-f','--filename',help='name of the diag file to read',required=True)
parser.add_argument('-m','--measure',help='measure to evaluate performance',required=False,choices=['obs','truth'],default='truth')
parser.add_argument('-b','--begin_index',help='starting index to read',type=int,required=False,default=101)
parser.add_argument('-e','--end_index',help='ending index to read',type=int,required=False,default=-1)
parser.add_argument('-s','--save_figure',help='save figures',action='store_true',required=False)
args = parser.parse_args()
fname = args.filename
measure = args.measure
sOI = args.begin_index
eOI = args.end_index
save_fig = args.save_figure
# Inflation factors to compare
#alpha = [1.0, 2.0, 3.0, 3.1, 3.2, 3.4]
alpha = [0.25, 0.3, 0.35, 0.4, 0.5, 0.6, 0.7, 1.0]
alpha = [1.0, 2.0, 2.5]
# some more arguments, currently hard-coded
save_figures = False # save plots as eps
yscale = 'linear' # y-axis of RMSE plots (linear/semilog)
yFix = 0.18 # fix the y-axis of RMSE plots ( None = automatic )
fOrient = 'portrait' # figure orientation (landscape/portrait)
if ( not measure ): measure = 'truth'
if ( sOI == -1 ): sOI = 0
nf = len(alpha)
fnames = []
for i in range(nf): fnames.append( fname + '%3.2f.nc4' % ((alpha[i])) )
if ( len(fnames) <= 15):
fcolor = ["#000000", "#C0C0C0", "#808080", "#800000", "#FF0000",\
"#800080", "#FF00FF", "#008000", "#00FF00", "#808000",\
"#FFFF00", "#000080", "#0000FF", "#008080", "#00FFFF"]
# black, silver, gray, maroon, red
# purple, fuchsia, green, lime, olive
# yellow, navy, blue, teal, aqua
else:
fcolor = get_Ndistinct_colors(len(fnames))
# read general dimensions and necessary attributes from the diagnostic file
[model, DA, _, gvarDA] = read_diag_info(fnames[0])
Bc = read_clim_cov(model=model,norm=True)
if ( gvarDA.update == 1 ): vstr = '3DVar'
elif ( gvarDA.update == 2 ): vstr = '4DVar'
# allocate room for variables
print('computing RMSE against %s' % measure)
xbrmse = np.zeros((len(fnames),DA.nassim))
xarmse = np.zeros((len(fnames),DA.nassim))
xyrmse = np.zeros((len(fnames),DA.nassim))
flabel = []
blabel = []
mean_prior = np.zeros(len(fnames))
mean_posterior = np.zeros(len(fnames))
std_prior = np.zeros(len(fnames))
std_posterior = np.zeros(len(fnames))
mean_niters = np.zeros(len(fnames))
std_niters = np.zeros(len(fnames))
innov = np.zeros(len(fnames))
mean_evratio = np.zeros(len(fnames))
std_evratio = np.zeros(len(fnames))
for fname in fnames:
print('reading ... %s' % fname)
f = fnames.index(fname)
try:
nc = Dataset(fname, mode='r', format='NETCDF4')
flabel.append(r'$\alpha = %3.2f$' % alpha[f])
blabel.append('%3.2f' % alpha[f])
nc.close()
except Exception as Instance:
print('Exception occurred during read of ' + fname)
print(type(Instance))
print(Instance.args)
print(Instance)
sys.exit(1)
# read the varDA for the specific diagnostic file
[_, _, _, varDA] = read_diag_info(fname)
# read the diagnostic file
xt, xb, xa, y, H, R, niters = read_diag(fname, 0, end_time=DA.nassim)
if ( varDA.update == 2 ): y = y[:,:model.Ndof]
# compute RMSE in prior, posterior and observations
if ( measure == 'truth' ):
xbrmse[f,] = np.sqrt( np.sum( (xt - xb)**2, axis = 1) / model.Ndof )
xarmse[f,] = np.sqrt( np.sum( (xt - xa)**2, axis = 1) / model.Ndof )
else:
xbrmse[f,] = np.sqrt( np.sum( (y - xb)**2, axis = 1) / model.Ndof )
xarmse[f,] = np.sqrt( np.sum( (y - xa)**2, axis = 1) / model.Ndof )
xyrmse[f,] = np.sqrt( np.sum( (xt - y)**2 ) / model.Ndof )
evratio = niters.copy()
evratio = np.zeros(len(niters))
for i in range(DA.nassim):
innov = np.sum((y[i,:] - np.dot(np.diag(H[i,:]),xb[ i,:]))**2)
totvar = np.sum(varDA.inflation.infl_fac*np.diag(Bc) + R[i,:])
evratio[i] = innov / totvar
mean_evratio[f] = np.mean(evratio[sOI:])
std_evratio[f] = np.std( evratio[sOI:],ddof=1)
# compute mean and std. dev. in the iteration count
mean_niters[f] = np.mean(niters[sOI+1:])
std_niters[f] = np.std( niters[sOI+1:], ddof=1)
# start plotting
#-----------------------------------------------------------
fig = pyplot.figure()
pyplot.clf()
pyplot.hold(True)
for fname in fnames:
f = fnames.index(fname)
q = np.squeeze(xbrmse[f,sOI:])
if ( yscale == 'linear' ): pyplot.plot( q,'-',color=fcolor[f],label=flabel[f],linewidth=1)
elif ( yscale == 'semilog' ): pyplot.semilogy(q,'-',color=fcolor[f],label=flabel[f],linewidth=1)
yl = pyplot.get(pyplot.gca(),'ylim')
xl = pyplot.get(pyplot.gca(),'xlim')
if ( yFix is None ): ymax = yl[1]
else: ymax = yFix
pyplot.ylim(0.0, ymax)
pyplot.xlim(0.0, len(q))
for fname in fnames:
f = fnames.index(fname)
q = np.squeeze(xbrmse[f,sOI:])
mean_prior[f] = np.mean(q)
std_prior[f] = np.std(q,ddof=1)
str = 'mean rmse : %5.4f +/- %5.4f' % (np.mean(q), np.std(q,ddof=1))
pyplot.text(25,(1-0.05*(f+1))*ymax,str,color=fcolor[f],fontsize=10)
pyplot.xlabel('Assimilation Cycle',fontweight='bold',fontsize=12)
pyplot.ylabel('RMSE',fontweight='bold',fontsize=12)
pyplot.title('RMSE - Prior',fontweight='bold',fontsize=14)
pyplot.legend(loc=1)
pyplot.hold(False)
if save_figures:
fig.savefig('%s_varDA_RMSE_Prior.pdf' % (model.Name),orientation=fOrient,format='pdf')
#-----------------------------------------------------------
#-----------------------------------------------------------
fig = pyplot.figure()
pyplot.clf()
pyplot.hold(True)
for fname in fnames:
f = fnames.index(fname)
q = np.squeeze(xarmse[f,sOI:])
if ( yscale == 'linear' ): pyplot.plot( q,'-',color=fcolor[f],label=flabel[f],linewidth=1)
elif ( yscale == 'semilog' ): pyplot.semilogy(q,'-',color=fcolor[f],label=flabel[f],linewidth=1)
yl = pyplot.get(pyplot.gca(),'ylim')
xl = pyplot.get(pyplot.gca(),'xlim')
if ( yFix is None ): ymax = yl[1]
else: ymax = yFix
pyplot.ylim(0.0, ymax)
pyplot.xlim(0.0, len(q))
for fname in fnames:
f = fnames.index(fname)
q = np.squeeze(xarmse[f,sOI:])
mean_posterior[f] = np.mean(q)
std_posterior[f] = np.std(q,ddof=1)
str = 'mean rmse : %5.4f +/- %5.4f' % (np.mean(q), np.std(q,ddof=1))
pyplot.text(25,(1-0.05*(f+1))*ymax,str,color=fcolor[f],fontsize=10)
pyplot.xlabel('Assimilation Cycle',fontweight='bold',fontsize=12)
pyplot.ylabel('RMSE',fontweight='bold',fontsize=12)
pyplot.title('RMSE - Posterior',fontweight='bold',fontsize=14)
pyplot.legend(loc=1)
pyplot.hold(False)
if save_figures:
fig.savefig('%s_varDA_RMSE_Posterior.pdf' % (model.Name),orientation=fOrient,format='pdf')
#-----------------------------------------------------------
#-----------------------------------------------------------
fig = pyplot.figure()
pyplot.clf()
pyplot.hold(True)
index = np.arange(nf) + 0.15
width = 0.35
bottom = 0.0
pyplot.bar(index,mean_prior-bottom,width,bottom=bottom,linewidth=0.0,color='0.75',edgecolor='0.75',yerr=std_prior, error_kw=dict(ecolor='black',elinewidth=3,capsize=5))
pyplot.bar(index+width,mean_posterior-bottom,width,bottom=bottom,linewidth=0.0,color='gray',edgecolor='gray',yerr=std_posterior,error_kw=dict(ecolor='black',elinewidth=3,capsize=5))
pyplot.xticks(index+width, blabel)
pyplot.xlabel('Inflation Factor', fontweight='bold',fontsize=12)
pyplot.ylabel('RMSE', fontweight='bold',fontsize=12)
pyplot.title( 'RMSE', fontweight='bold',fontsize=14)
pyplot.hold(False)
if save_figures:
fig.savefig('%s_varDA_RMSE.pdf' % (model.Name),orientation=fOrient,format='pdf')
#-----------------------------------------------------------
#-----------------------------------------------------------
fig = pyplot.figure()
pyplot.clf()
pyplot.hold(True)
index = np.arange(nf) + 0.2
width = 0.6
pyplot.bar(index,mean_niters,width,linewidth=0.0,color='gray',edgecolor='gray',yerr=std_niters,error_kw=dict(ecolor='black',elinewidth=3,capsize=5))
pyplot.xticks(index+width/2, blabel)
pyplot.xlabel('Inflation Factor', fontweight='bold',fontsize=12)
pyplot.ylabel('No. of Iterations', fontweight='bold',fontsize=12)
pyplot.title( 'No. of Iterations', fontweight='bold',fontsize=14)
pyplot.hold(False)
if save_figures:
fig.savefig('%s_varDA_niters.pdf' % (model.Name),orientation=fOrient,format='pdf')
#-----------------------------------------------------------
#-----------------------------------------------------------
fig = pyplot.figure()
pyplot.clf()
pyplot.hold(True)
index = np.arange(nf) + 0.2
width = 0.6
pyplot.bar(index,mean_evratio,width,linewidth=0.0,color='gray',edgecolor='gray',yerr=std_evratio,error_kw=dict(ecolor='black',elinewidth=3,capsize=5))
pyplot.xticks(index+width/2, blabel)
pyplot.xlabel('Inflation Factor', fontweight='bold',fontsize=12)
pyplot.ylabel('Error - Variance Ratio', fontweight='bold',fontsize=12)
pyplot.title( 'Error - Variance Ratio', fontweight='bold',fontsize=14)
pyplot.hold(False)
if save_figures:
fig.savefig('%s_varDA_evratio.pdf' % (model.Name),orientation=fOrient,format='pdf')
#-----------------------------------------------------------
if not save_figures: pyplot.show()
print('... all done ...')
sys.exit(0)
###############################################################
###############################################################
def get_Ndistinct_colors(num_colors):
from colorsys import hls_to_rgb
colors=[]
for i in np.arange(0.0, 360.0, 360.0 / num_colors):
hue = i/360.0
lightness = (50 + np.random.rand() * 10)/100.0
saturation = (90 + np.random.rand() * 10)/100.0
colors.append(hls_to_rgb(hue, lightness, saturation))
return colors
###############################################################
###############################################################
if __name__ == "__main__":
main()
###############################################################
| 40.516447
| 185
| 0.535033
| true
| true
|
|
1c430c5cc94b65be4f73ba40070bf64a8b9520f7
| 518
|
py
|
Python
|
backend/app/app/crud/crud_permission.py
|
l2m2/fastapi-vue-admin
|
165060ee510b6438ff8aa42ab839fcf77f5dd387
|
[
"MIT"
] | 5
|
2021-11-25T20:07:31.000Z
|
2022-03-22T02:28:51.000Z
|
backend/app/app/crud/crud_permission.py
|
l2m2/fastapi-vue-admin
|
165060ee510b6438ff8aa42ab839fcf77f5dd387
|
[
"MIT"
] | null | null | null |
backend/app/app/crud/crud_permission.py
|
l2m2/fastapi-vue-admin
|
165060ee510b6438ff8aa42ab839fcf77f5dd387
|
[
"MIT"
] | 3
|
2021-05-15T18:19:10.000Z
|
2021-08-24T08:23:41.000Z
|
from typing import Optional
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models.permission import Permission
from app.schemas.permission import PermissionCreate, PermissionUpdate, PermissionList
class CRUDPermission(CRUDBase[Permission, PermissionCreate, PermissionUpdate, PermissionList]):
def get_by_code(self, db: Session, *, code: str) -> Optional[Permission]:
return db.query(Permission).filter(Permission.code == code).first()
permission = CRUDPermission(Permission)
| 34.533333
| 95
| 0.80888
|
from typing import Optional
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models.permission import Permission
from app.schemas.permission import PermissionCreate, PermissionUpdate, PermissionList
class CRUDPermission(CRUDBase[Permission, PermissionCreate, PermissionUpdate, PermissionList]):
def get_by_code(self, db: Session, *, code: str) -> Optional[Permission]:
return db.query(Permission).filter(Permission.code == code).first()
permission = CRUDPermission(Permission)
| true
| true
|
1c430d68cee1c1046048674e472c16c99b28fcec
| 3,172
|
py
|
Python
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/network_groupnets.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/network_groupnets.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/network_groupnets.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_0.models.network_groupnet_extended import NetworkGroupnetExtended # noqa: F401,E501
class NetworkGroupnets(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'groupnets': 'list[NetworkGroupnetExtended]'
}
attribute_map = {
'groupnets': 'groupnets'
}
def __init__(self, groupnets=None): # noqa: E501
"""NetworkGroupnets - a model defined in Swagger""" # noqa: E501
self._groupnets = None
self.discriminator = None
if groupnets is not None:
self.groupnets = groupnets
@property
def groupnets(self):
"""Gets the groupnets of this NetworkGroupnets. # noqa: E501
:return: The groupnets of this NetworkGroupnets. # noqa: E501
:rtype: list[NetworkGroupnetExtended]
"""
return self._groupnets
@groupnets.setter
def groupnets(self, groupnets):
"""Sets the groupnets of this NetworkGroupnets.
:param groupnets: The groupnets of this NetworkGroupnets. # noqa: E501
:type: list[NetworkGroupnetExtended]
"""
self._groupnets = groupnets
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkGroupnets):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.582609
| 101
| 0.580706
|
import pprint
import re
import six
from isi_sdk_8_2_0.models.network_groupnet_extended import NetworkGroupnetExtended
class NetworkGroupnets(object):
swagger_types = {
'groupnets': 'list[NetworkGroupnetExtended]'
}
attribute_map = {
'groupnets': 'groupnets'
}
def __init__(self, groupnets=None):
self._groupnets = None
self.discriminator = None
if groupnets is not None:
self.groupnets = groupnets
@property
def groupnets(self):
return self._groupnets
@groupnets.setter
def groupnets(self, groupnets):
self._groupnets = groupnets
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, NetworkGroupnets):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c430e38b9ca8f688f72cacfca7a1cdccab2d5c4
| 294
|
py
|
Python
|
odooku/patch/__init__.py
|
davejrv/import
|
0dbca8f432d1a051a2bdb30c952cc26f1ffd74ae
|
[
"Apache-2.0"
] | 55
|
2017-09-11T06:48:39.000Z
|
2022-03-31T18:14:46.000Z
|
odooku/patch/__init__.py
|
davejrv/import
|
0dbca8f432d1a051a2bdb30c952cc26f1ffd74ae
|
[
"Apache-2.0"
] | 4
|
2018-01-13T09:13:48.000Z
|
2019-09-28T10:24:43.000Z
|
odooku/patch/__init__.py
|
davejrv/import
|
0dbca8f432d1a051a2bdb30c952cc26f1ffd74ae
|
[
"Apache-2.0"
] | 46
|
2017-12-30T22:31:45.000Z
|
2022-02-17T05:35:55.000Z
|
import importlib
import pkgutil
from . patch import SoftPatch, HardPatch, patcher
def apply_patches():
import odooku_patches
for importer, name, ispkg in pkgutil.iter_modules(odooku_patches.__path__):
module = importlib.import_module('%s.%s' % (odooku_patches.__name__, name))
| 32.666667
| 83
| 0.758503
|
import importlib
import pkgutil
from . patch import SoftPatch, HardPatch, patcher
def apply_patches():
import odooku_patches
for importer, name, ispkg in pkgutil.iter_modules(odooku_patches.__path__):
module = importlib.import_module('%s.%s' % (odooku_patches.__name__, name))
| true
| true
|
1c430e92ba5f56e04484b5f8e6dc0abe1ade4089
| 2,344
|
py
|
Python
|
no_agent2/policy_network.py
|
songaal/rltrader
|
4aac8085dda1a58fbf30a313f2a4608398c971a3
|
[
"MIT"
] | 2
|
2020-06-13T07:18:10.000Z
|
2020-11-03T03:46:40.000Z
|
no_agent2/policy_network.py
|
songaal/rltrader
|
4aac8085dda1a58fbf30a313f2a4608398c971a3
|
[
"MIT"
] | null | null | null |
no_agent2/policy_network.py
|
songaal/rltrader
|
4aac8085dda1a58fbf30a313f2a4608398c971a3
|
[
"MIT"
] | 1
|
2020-05-16T08:41:29.000Z
|
2020-05-16T08:41:29.000Z
|
import numpy as np
from keras.models import Sequential
from keras.layers import Activation, LSTM, Dense, BatchNormalization, Embedding, Input
from keras.optimizers import sgd
from keras import callbacks
from keras.preprocessing import sequence
class PolicyNetwork:
def __init__(self, input_dim, output_dim=0, lr=0.01):
self.input_dim = input_dim
self.lr = lr
# LSTM 신경망
self.model = Sequential()
self.model.add(LSTM(256, input_shape=(5, 15),
return_sequences=True, stateful=False, dropout=0.5))
# 기존 LSTM 모델
# self.model.add(LSTM(256, input_shape=input_dim,
# return_sequences=True, stateful=False, dropout=0.5))
self.model.add(BatchNormalization())
self.model.add(LSTM(256, return_sequences=True, stateful=False, dropout=0.5))
self.model.add(BatchNormalization())
self.model.add(LSTM(256, return_sequences=False, stateful=False, dropout=0.5))
self.model.add(BatchNormalization())
self.model.add(Dense(3))
# self.model.add(Dense(units=3, activation='softmax'))
self.model.add(Activation('linear'))
self.model.compile(optimizer=sgd(lr=lr), loss='mse', metrics=['accuracy'])
self.prob = None
def predict(self, x):
return self.model.predict(x)[0]
def fit(self, x_train, y_train, x_test, y_test, epochs=1000, batch_size=10, model_path=None):
tensorboard = callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
model_checkpoint = callbacks.ModelCheckpoint(filepath=model_path, save_best_only=True)
early_stopping = callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=1)
self.model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[tensorboard, model_checkpoint, early_stopping])
def save_model(self, model_path):
if model_path is not None and self.model is not None:
self.model.save_weights(model_path, overwrite=True)
def load_model(self, model_path):
if model_path is not None:
self.model.load_weights(model_path)
| 43.407407
| 117
| 0.648464
|
import numpy as np
from keras.models import Sequential
from keras.layers import Activation, LSTM, Dense, BatchNormalization, Embedding, Input
from keras.optimizers import sgd
from keras import callbacks
from keras.preprocessing import sequence
class PolicyNetwork:
def __init__(self, input_dim, output_dim=0, lr=0.01):
self.input_dim = input_dim
self.lr = lr
self.model = Sequential()
self.model.add(LSTM(256, input_shape=(5, 15),
return_sequences=True, stateful=False, dropout=0.5))
self.model.add(BatchNormalization())
self.model.add(LSTM(256, return_sequences=True, stateful=False, dropout=0.5))
self.model.add(BatchNormalization())
self.model.add(LSTM(256, return_sequences=False, stateful=False, dropout=0.5))
self.model.add(BatchNormalization())
self.model.add(Dense(3))
self.model.add(Activation('linear'))
self.model.compile(optimizer=sgd(lr=lr), loss='mse', metrics=['accuracy'])
self.prob = None
def predict(self, x):
return self.model.predict(x)[0]
def fit(self, x_train, y_train, x_test, y_test, epochs=1000, batch_size=10, model_path=None):
tensorboard = callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
model_checkpoint = callbacks.ModelCheckpoint(filepath=model_path, save_best_only=True)
early_stopping = callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=1)
self.model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[tensorboard, model_checkpoint, early_stopping])
def save_model(self, model_path):
if model_path is not None and self.model is not None:
self.model.save_weights(model_path, overwrite=True)
def load_model(self, model_path):
if model_path is not None:
self.model.load_weights(model_path)
| true
| true
|
1c430f38da4bfc0e8a45b675aa1cea6c3c1fecfe
| 9,065
|
py
|
Python
|
afterglow_core/resources/data_provider_plugins/dss_image_provider.py
|
SkynetRTN/afterglow-core
|
cd9d84e68cc7126887d0aa7f96f608b91b0b0ae3
|
[
"Apache-2.0"
] | 2
|
2021-05-24T15:12:07.000Z
|
2022-02-17T19:58:16.000Z
|
afterglow_core/resources/data_provider_plugins/dss_image_provider.py
|
SkynetRTN/afterglow-core
|
cd9d84e68cc7126887d0aa7f96f608b91b0b0ae3
|
[
"Apache-2.0"
] | 1
|
2022-02-27T03:01:06.000Z
|
2022-02-27T03:01:06.000Z
|
afterglow_core/resources/data_provider_plugins/dss_image_provider.py
|
SkynetRTN/afterglow-core
|
cd9d84e68cc7126887d0aa7f96f608b91b0b0ae3
|
[
"Apache-2.0"
] | 2
|
2021-06-08T18:16:40.000Z
|
2021-07-09T14:19:49.000Z
|
"""
Afterglow Core: imaging survey data provider plugin
"""
from typing import List as TList, Optional, Tuple, Union
from io import BytesIO
from marshmallow.fields import Float, String
from marshmallow.validate import OneOf, Range
import requests
import astropy.io.fits as pyfits
from ...models import DataProvider, DataProviderAsset
from ...errors import MissingFieldError, ValidationError
from ...errors.data_provider import AssetNotFoundError
__all__ = ['DSSImageDataProvider']
class DSSImageDataProvider(DataProvider):
r"""
DSS image data provider plugin class
Asset path is <ra>,<dec>\<width>,<height> or <ra>,<dec>\<width>
where <ra> and <dec> are field center coordinates in degrees, <width> and
<height> is FOV size in arcminutes (<height> = <width> if omitted), e.g.
"1.234,+5.678\15.0".
"""
name = 'dss'
display_name = 'DSS Images'
description = 'Access to Digitized Sky Survey images'
searchable = True
browseable = False
readonly = True
quota = usage = None
allow_multiple_instances = False
search_fields = dict(
ra_hours=dict(
label='Center RA', type='float', min_val=0,
max_val=24),
dec_degs=dict(
label='Center Dec', type='float',
min_val=-90, max_val=90),
object=dict(label='Object', type='text'),
width=dict(
label='Field Width [arcmin]', type='float', min_val=0),
height=dict(
label='Field Height [arcmin]', type='int', min_val=0),
)
server: str = String(validate=OneOf(['STScI', 'ESO']), default='STScI')
timeout: float = Float(
validate=Range(min=0, min_inclusive=False), default=30)
@staticmethod
def _get_asset_params(path: str) -> Tuple[float, float, float, float]:
r"""
Decompose asset path into RA/Dec in degrees and field width/height
in arcminutes
:param path: asset path in the form <ra>,<dec>\<width>,<height>
:return: tuple (RA, Dec, width, height)
"""
try:
position, size = path.split('\\')
ra_degs, dec_degs = position.split(',')
ra_degs, dec_degs = float(ra_degs), float(dec_degs)
if not 0 <= ra_degs < 360:
raise ValueError('Expected 0 <= ra < 360')
if not -90 <= dec_degs <= 90:
raise ValueError('Expected -90 <= dec <= 90')
if ',' in size:
width, height = size.split(',')
width, height = float(width), float(height)
else:
width = height = float(size)
if width <= 0:
raise ValueError('Positive FOV width expected')
if height <= 0:
raise ValueError('Positive FOV height expected')
except (TypeError, ValueError) as e:
raise ValidationError('path', str(e))
return ra_degs, dec_degs, width, height
@staticmethod
def _get_asset(ra_degs: float, dec_degs: float, width: float,
height: float) -> DataProviderAsset:
"""
Return image survey data provider asset for the given parameters
:param ra_degs: right ascension of field center in degrees
:param dec_degs: declination of field center in degrees
:param width: field width in arcminutes
:param height: field height in arcminutes
:return: asset object
"""
if width == height:
size = str(width)
else:
size = '{},{}'.format(width, height)
return DataProviderAsset(
name='DSS_{},{}'.format(ra_degs, dec_degs),
collection=False,
path='{},{}\\{}'.format(ra_degs, dec_degs, size),
metadata={
'type': 'FITS', 'survey': 'DSS',
'ra': ra_degs, 'dec': dec_degs,
'fov_ra': width, 'fov_dec': height,
'layers': 1,
},
)
def find_assets(self, path: Optional[str] = None,
sort_by: Optional[str] = None,
page_size: Optional[int] = None,
page: Optional[Union[int, str]] = None,
ra_hours: Optional[float] = None,
dec_degs: Optional[float] = None,
width: Optional[float] = None,
height: Optional[float] = None) \
-> Tuple[TList[DataProviderAsset], None]:
"""
Return a list of assets matching the given parameters
Returns an empty list if survey is unknown or no imaging data at the
given FOV; otherwise, returns a single asset
:param path: path to the collection asset to search in; ignored
:param sort_by: unused
:param page_size: unused
:param page: unused
:param ra_hours: RA of image center in hours
:param dec_degs: Dec of image center in degrees
:param width: image width in arcminutes
:param height: image height in arcminutes; default: same as `width`
:return: list of 0 or 1 :class:`DataProviderAsset` objects for assets
matching the query parameters, and None for the pagination info
"""
if ra_hours is None:
raise MissingFieldError('ra_hours')
try:
ra_hours = float(ra_hours)
if not 0 <= ra_hours < 24:
raise ValueError()
except ValueError:
raise ValidationError(
'ra_hours', 'Expected 0 <= ra_hours < 24')
if dec_degs is None:
raise MissingFieldError('dec_degs')
try:
dec_degs = float(dec_degs)
if not -90 <= dec_degs <= 90:
raise ValueError()
except ValueError:
raise ValidationError(
'dec_degs', 'Expected -90 <= dec_degs <= 90')
if width is None and height is None:
raise MissingFieldError('width,height')
if width is not None:
try:
width = float(width)
if width <= 0:
raise ValueError()
except ValueError:
raise ValidationError('width', 'Positive FOV width expected')
if height is not None:
try:
height = float(height)
if height <= 0:
raise ValueError()
except ValueError:
raise ValidationError('height', 'Positive FOV height expected')
if width is None:
width = height
elif height is None:
height = width
return [self._get_asset(ra_hours*15, dec_degs, width, height)], None
def get_asset(self, path: str) -> DataProviderAsset:
r"""
Return an asset at the given path
:param path: asset path in the form
<survey>\<position>\<width>,<height>
:return: asset object
"""
return self._get_asset(*self._get_asset_params(path))
def get_asset_data(self, path: str) -> bytes:
"""
Return data for a non-collection asset at the given path
:param path: asset path; must identify a non-collection asset
:return: asset data
"""
ra_degs, dec_degs, width, height = self._get_asset_params(path)
try:
if self.server == 'STScI':
url = 'https://stdatu.stsci.edu/cgi-bin/dss_search'
params = {
'v': 'poss2ukstu_red',
'r': str(ra_degs),
'd': str(dec_degs),
'e': 'J2000',
'h': str(height),
'w': str(width),
'f': 'fits',
'c': 'none',
'fov': 'NONE',
'v3': '',
}
else:
url = 'https://archive.eso.org/dss/dss/image'
params = {
'ra': str(ra_degs),
'dec': str(dec_degs),
'equinox': 'J2000',
'name': '',
'x': str(width),
'y': str(height),
'Sky-Survey': 'DSS2-red',
'mime-type': 'download-fits',
'statsmode': 'WEBFORM',
}
res = requests.request(
'GET', url, params=params, timeout=self.timeout)
except Exception as e:
raise AssetNotFoundError(path=path, reason=str(e))
if res.status_code != 200:
raise AssetNotFoundError(
path=path,
reason='Request failed (HTTP status {})'
.format(res.status_code))
buf = BytesIO(res.content)
with pyfits.open(buf, 'readonly') as f:
if len(f) > 1:
# Remove extension HDU
out = BytesIO()
f[0].writeto(out, output_verify='silentfix+ignore')
return out.getvalue()
return res.content
| 35.272374
| 79
| 0.534915
|
from typing import List as TList, Optional, Tuple, Union
from io import BytesIO
from marshmallow.fields import Float, String
from marshmallow.validate import OneOf, Range
import requests
import astropy.io.fits as pyfits
from ...models import DataProvider, DataProviderAsset
from ...errors import MissingFieldError, ValidationError
from ...errors.data_provider import AssetNotFoundError
__all__ = ['DSSImageDataProvider']
class DSSImageDataProvider(DataProvider):
name = 'dss'
display_name = 'DSS Images'
description = 'Access to Digitized Sky Survey images'
searchable = True
browseable = False
readonly = True
quota = usage = None
allow_multiple_instances = False
search_fields = dict(
ra_hours=dict(
label='Center RA', type='float', min_val=0,
max_val=24),
dec_degs=dict(
label='Center Dec', type='float',
min_val=-90, max_val=90),
object=dict(label='Object', type='text'),
width=dict(
label='Field Width [arcmin]', type='float', min_val=0),
height=dict(
label='Field Height [arcmin]', type='int', min_val=0),
)
server: str = String(validate=OneOf(['STScI', 'ESO']), default='STScI')
timeout: float = Float(
validate=Range(min=0, min_inclusive=False), default=30)
@staticmethod
def _get_asset_params(path: str) -> Tuple[float, float, float, float]:
try:
position, size = path.split('\\')
ra_degs, dec_degs = position.split(',')
ra_degs, dec_degs = float(ra_degs), float(dec_degs)
if not 0 <= ra_degs < 360:
raise ValueError('Expected 0 <= ra < 360')
if not -90 <= dec_degs <= 90:
raise ValueError('Expected -90 <= dec <= 90')
if ',' in size:
width, height = size.split(',')
width, height = float(width), float(height)
else:
width = height = float(size)
if width <= 0:
raise ValueError('Positive FOV width expected')
if height <= 0:
raise ValueError('Positive FOV height expected')
except (TypeError, ValueError) as e:
raise ValidationError('path', str(e))
return ra_degs, dec_degs, width, height
@staticmethod
def _get_asset(ra_degs: float, dec_degs: float, width: float,
height: float) -> DataProviderAsset:
if width == height:
size = str(width)
else:
size = '{},{}'.format(width, height)
return DataProviderAsset(
name='DSS_{},{}'.format(ra_degs, dec_degs),
collection=False,
path='{},{}\\{}'.format(ra_degs, dec_degs, size),
metadata={
'type': 'FITS', 'survey': 'DSS',
'ra': ra_degs, 'dec': dec_degs,
'fov_ra': width, 'fov_dec': height,
'layers': 1,
},
)
def find_assets(self, path: Optional[str] = None,
sort_by: Optional[str] = None,
page_size: Optional[int] = None,
page: Optional[Union[int, str]] = None,
ra_hours: Optional[float] = None,
dec_degs: Optional[float] = None,
width: Optional[float] = None,
height: Optional[float] = None) \
-> Tuple[TList[DataProviderAsset], None]:
if ra_hours is None:
raise MissingFieldError('ra_hours')
try:
ra_hours = float(ra_hours)
if not 0 <= ra_hours < 24:
raise ValueError()
except ValueError:
raise ValidationError(
'ra_hours', 'Expected 0 <= ra_hours < 24')
if dec_degs is None:
raise MissingFieldError('dec_degs')
try:
dec_degs = float(dec_degs)
if not -90 <= dec_degs <= 90:
raise ValueError()
except ValueError:
raise ValidationError(
'dec_degs', 'Expected -90 <= dec_degs <= 90')
if width is None and height is None:
raise MissingFieldError('width,height')
if width is not None:
try:
width = float(width)
if width <= 0:
raise ValueError()
except ValueError:
raise ValidationError('width', 'Positive FOV width expected')
if height is not None:
try:
height = float(height)
if height <= 0:
raise ValueError()
except ValueError:
raise ValidationError('height', 'Positive FOV height expected')
if width is None:
width = height
elif height is None:
height = width
return [self._get_asset(ra_hours*15, dec_degs, width, height)], None
def get_asset(self, path: str) -> DataProviderAsset:
return self._get_asset(*self._get_asset_params(path))
def get_asset_data(self, path: str) -> bytes:
ra_degs, dec_degs, width, height = self._get_asset_params(path)
try:
if self.server == 'STScI':
url = 'https://stdatu.stsci.edu/cgi-bin/dss_search'
params = {
'v': 'poss2ukstu_red',
'r': str(ra_degs),
'd': str(dec_degs),
'e': 'J2000',
'h': str(height),
'w': str(width),
'f': 'fits',
'c': 'none',
'fov': 'NONE',
'v3': '',
}
else:
url = 'https://archive.eso.org/dss/dss/image'
params = {
'ra': str(ra_degs),
'dec': str(dec_degs),
'equinox': 'J2000',
'name': '',
'x': str(width),
'y': str(height),
'Sky-Survey': 'DSS2-red',
'mime-type': 'download-fits',
'statsmode': 'WEBFORM',
}
res = requests.request(
'GET', url, params=params, timeout=self.timeout)
except Exception as e:
raise AssetNotFoundError(path=path, reason=str(e))
if res.status_code != 200:
raise AssetNotFoundError(
path=path,
reason='Request failed (HTTP status {})'
.format(res.status_code))
buf = BytesIO(res.content)
with pyfits.open(buf, 'readonly') as f:
if len(f) > 1:
out = BytesIO()
f[0].writeto(out, output_verify='silentfix+ignore')
return out.getvalue()
return res.content
| true
| true
|
1c43107c4f2485e25dc74af0dfc00acefcc043c7
| 10,203
|
py
|
Python
|
kuryr_kubernetes/cmd/status.py
|
digitalsimboja/kuryr-kubernetes
|
e2e8e514d3c93b0546716dfe0c458e91d14ffa10
|
[
"Apache-2.0"
] | 155
|
2016-05-23T01:18:04.000Z
|
2022-02-07T04:27:53.000Z
|
kuryr_kubernetes/cmd/status.py
|
digitalsimboja/kuryr-kubernetes
|
e2e8e514d3c93b0546716dfe0c458e91d14ffa10
|
[
"Apache-2.0"
] | 635
|
2019-04-08T18:24:14.000Z
|
2022-03-30T13:48:10.000Z
|
kuryr_kubernetes/cmd/status.py
|
digitalsimboja/kuryr-kubernetes
|
e2e8e514d3c93b0546716dfe0c458e91d14ffa10
|
[
"Apache-2.0"
] | 71
|
2016-05-24T15:46:39.000Z
|
2022-03-11T06:24:44.000Z
|
# Copyright 2018 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CLI interface for kuryr status commands.
"""
import copy
import sys
import textwrap
import traceback
import prettytable
import os_vif
from os_vif.objects import base
from oslo_config import cfg
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants
from kuryr_kubernetes import exceptions
from kuryr_kubernetes import objects
from kuryr_kubernetes.objects import vif
from kuryr_kubernetes import utils
from kuryr_kubernetes import version
CONF = config.CONF
UPGRADE_CHECK_SUCCESS = 0
UPGRADE_CHECK_WARNING = 1
UPGRADE_CHECK_FAILURE = 2
UPGRADE_CHECK_MSG_MAP = {
UPGRADE_CHECK_SUCCESS: 'Success',
UPGRADE_CHECK_WARNING: 'Warning',
UPGRADE_CHECK_FAILURE: 'Failure',
}
class UpgradeCheckResult(object):
"""Class used for 'kuryr-k8s-status upgrade check' results.
The 'code' attribute is an UpgradeCheckCode enum.
The 'details' attribute is a message generally only used for
checks that result in a warning or failure code. The details should provide
information on what issue was discovered along with any remediation.
"""
def __init__(self, code, details=None):
super(UpgradeCheckResult, self).__init__()
self.code = code
self.details = details
def get_details(self):
if self.details is not None:
# wrap the text on the details to 60 characters
return '\n'.join(textwrap.wrap(self.details, 60,
subsequent_indent=' ' * 9))
class UpgradeCommands(object):
def __init__(self):
self.check_methods = {
'Pod annotations': self._check_annotations, # Stein
}
clients.setup_kubernetes_client()
self.k8s = clients.get_kubernetes_client()
def _get_annotation(self, pod):
annotations = pod['metadata']['annotations']
if constants.K8S_ANNOTATION_VIF not in annotations:
# NOTE(dulek): We ignore pods without annotation, those
# probably are hostNetworking.
return None
k_ann = annotations[constants.K8S_ANNOTATION_VIF]
k_ann = jsonutils.loads(k_ann)
obj = base.VersionedObject.obj_from_primitive(k_ann)
return obj
def _check_annotations(self):
old_count = 0
malformed_count = 0
pods = self.k8s.get('/api/v1/pods')['items']
for pod in pods:
try:
obj = self._get_annotation(pod)
if not obj:
# NOTE(dulek): We ignore pods without annotation, those
# probably are hostNetworking.
continue
except Exception:
# TODO(dulek): We might want to print this exception.
malformed_count += 1
continue
if obj.obj_name() != objects.vif.PodState.obj_name():
old_count += 1
elif not self._has_valid_sriov_annot(obj):
old_count += 1
if malformed_count == 0 and old_count == 0:
return UpgradeCheckResult(0, 'All annotations are updated.')
elif malformed_count > 0 and old_count == 0:
msg = ('You have %d malformed Kuryr pod annotations in your '
'deployment. This is not blocking the upgrade, but '
'consider investigating it.' % malformed_count)
return UpgradeCheckResult(1, msg)
elif old_count > 0:
msg = ('You have %d Kuryr pod annotations in old format. You need '
'to run `kuryr-k8s-status upgrade update-annotations` '
'before proceeding with the upgrade.' % old_count)
return UpgradeCheckResult(2, msg)
def upgrade_check(self):
check_results = []
t = prettytable.PrettyTable(['Upgrade Check Results'],
hrules=prettytable.ALL)
t.align = 'l'
for name, method in self.check_methods.items():
result = method()
check_results.append(result)
cell = (
'Check: %(name)s\n'
'Result: %(result)s\n'
'Details: %(details)s' %
{
'name': name,
'result': UPGRADE_CHECK_MSG_MAP[result.code],
'details': result.get_details(),
}
)
t.add_row([cell])
print(t)
return max(res.code for res in check_results)
def _convert_annotations(self, test_fn, update_fn):
updated_count = 0
not_updated_count = 0
malformed_count = 0
pods = self.k8s.get('/api/v1/pods')['items']
for pod in pods:
try:
obj = self._get_annotation(pod)
if not obj:
# NOTE(dulek): We ignore pods without annotation, those
# probably are hostNetworking.
continue
except Exception:
malformed_count += 1
continue
if test_fn(obj):
obj = update_fn(obj)
serialized = obj.obj_to_primitive()
try:
ann = {
constants.K8S_ANNOTATION_VIF:
jsonutils.dumps(serialized)
}
self.k8s.annotate(
utils.get_res_link(pod), ann,
pod['metadata']['resourceVersion'])
except exceptions.K8sClientException:
print('Error when updating annotation for pod %s/%s' %
(pod['metadata']['namespace'],
pod['metadata']['name']))
not_updated_count += 1
updated_count += 1
t = prettytable.PrettyTable(['Stat', 'Number'],
hrules=prettytable.ALL)
t.align = 'l'
cells = [['Updated annotations', updated_count],
['Malformed annotations', malformed_count],
['Annotations left', not_updated_count]]
for cell in cells:
t.add_row(cell)
print(t)
def _has_valid_sriov_annot(self, state):
for obj in state.vifs.values():
if obj.obj_name() != objects.vif.VIFSriov.obj_name():
continue
if hasattr(obj, 'pod_name') and hasattr(obj, 'pod_link'):
continue
return False
return True
def _convert_sriov(self, state):
new_state = copy.deepcopy(state)
for iface, obj in new_state.additional_vifs.items():
if obj.obj_name() != objects.vif.VIFSriov.obj_name():
continue
if hasattr(obj, 'pod_name') and hasattr(obj, 'pod_link'):
continue
new_obj = objects.vif.VIFSriov()
new_obj.__dict__ = obj.__dict__.copy()
new_state.additional_vifs[iface] = new_obj
return new_state
def update_annotations(self):
def test_fn(obj):
return (obj.obj_name() != objects.vif.PodState.obj_name() or
not self._has_valid_sriov_annot(obj))
def update_fn(obj):
if obj.obj_name() != objects.vif.PodState.obj_name():
return vif.PodState(default_vif=obj)
return self._convert_sriov(obj)
self._convert_annotations(test_fn, update_fn)
def downgrade_annotations(self):
# NOTE(danil): There is no need to downgrade sriov vifs
# when annotations has old format. After downgrade annotations
# will have only one default vif and it could not be sriov vif
def test_fn(obj):
return obj.obj_name() == objects.vif.PodState.obj_name()
def update_fn(obj):
return obj.default_vif
self._convert_annotations(test_fn, update_fn)
def print_version():
print(version.version_info.version_string())
def add_parsers(subparsers):
upgrade_cmds = UpgradeCommands()
upgrade = subparsers.add_parser(
'upgrade', help='Actions related to upgrades between releases.')
sub = upgrade.add_subparsers()
check = sub.add_parser('check', help='Check if upgrading is possible.')
check.set_defaults(action_fn=upgrade_cmds.upgrade_check)
ann_update = sub.add_parser(
'update-annotations',
help='Update annotations in K8s API to newest version.')
ann_update.set_defaults(action_fn=upgrade_cmds.update_annotations)
ann_downgrade = sub.add_parser(
'downgrade-annotations',
help='Downgrade annotations in K8s API to previous version (useful '
'when reverting a failed upgrade).')
ann_downgrade.set_defaults(action_fn=upgrade_cmds.downgrade_annotations)
version_action = subparsers.add_parser('version')
version_action.set_defaults(action_fn=print_version)
def main():
opt = cfg.SubCommandOpt(
'category', title='command',
description='kuryr-k8s-status command or category to execute',
handler=add_parsers)
conf = cfg.ConfigOpts()
conf.register_cli_opt(opt)
conf(sys.argv[1:])
os_vif.initialize()
objects.register_locally_defined_vifs()
try:
return conf.category.action_fn()
except Exception:
print('Error:\n%s' % traceback.format_exc())
# This is 255 so it's not confused with the upgrade check exit codes.
return 255
if __name__ == '__main__':
main()
| 34.469595
| 79
| 0.604822
|
import copy
import sys
import textwrap
import traceback
import prettytable
import os_vif
from os_vif.objects import base
from oslo_config import cfg
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants
from kuryr_kubernetes import exceptions
from kuryr_kubernetes import objects
from kuryr_kubernetes.objects import vif
from kuryr_kubernetes import utils
from kuryr_kubernetes import version
CONF = config.CONF
UPGRADE_CHECK_SUCCESS = 0
UPGRADE_CHECK_WARNING = 1
UPGRADE_CHECK_FAILURE = 2
UPGRADE_CHECK_MSG_MAP = {
UPGRADE_CHECK_SUCCESS: 'Success',
UPGRADE_CHECK_WARNING: 'Warning',
UPGRADE_CHECK_FAILURE: 'Failure',
}
class UpgradeCheckResult(object):
def __init__(self, code, details=None):
super(UpgradeCheckResult, self).__init__()
self.code = code
self.details = details
def get_details(self):
if self.details is not None:
return '\n'.join(textwrap.wrap(self.details, 60,
subsequent_indent=' ' * 9))
class UpgradeCommands(object):
def __init__(self):
self.check_methods = {
'Pod annotations': self._check_annotations,
}
clients.setup_kubernetes_client()
self.k8s = clients.get_kubernetes_client()
def _get_annotation(self, pod):
annotations = pod['metadata']['annotations']
if constants.K8S_ANNOTATION_VIF not in annotations:
return None
k_ann = annotations[constants.K8S_ANNOTATION_VIF]
k_ann = jsonutils.loads(k_ann)
obj = base.VersionedObject.obj_from_primitive(k_ann)
return obj
def _check_annotations(self):
old_count = 0
malformed_count = 0
pods = self.k8s.get('/api/v1/pods')['items']
for pod in pods:
try:
obj = self._get_annotation(pod)
if not obj:
continue
except Exception:
malformed_count += 1
continue
if obj.obj_name() != objects.vif.PodState.obj_name():
old_count += 1
elif not self._has_valid_sriov_annot(obj):
old_count += 1
if malformed_count == 0 and old_count == 0:
return UpgradeCheckResult(0, 'All annotations are updated.')
elif malformed_count > 0 and old_count == 0:
msg = ('You have %d malformed Kuryr pod annotations in your '
'deployment. This is not blocking the upgrade, but '
'consider investigating it.' % malformed_count)
return UpgradeCheckResult(1, msg)
elif old_count > 0:
msg = ('You have %d Kuryr pod annotations in old format. You need '
'to run `kuryr-k8s-status upgrade update-annotations` '
'before proceeding with the upgrade.' % old_count)
return UpgradeCheckResult(2, msg)
def upgrade_check(self):
check_results = []
t = prettytable.PrettyTable(['Upgrade Check Results'],
hrules=prettytable.ALL)
t.align = 'l'
for name, method in self.check_methods.items():
result = method()
check_results.append(result)
cell = (
'Check: %(name)s\n'
'Result: %(result)s\n'
'Details: %(details)s' %
{
'name': name,
'result': UPGRADE_CHECK_MSG_MAP[result.code],
'details': result.get_details(),
}
)
t.add_row([cell])
print(t)
return max(res.code for res in check_results)
def _convert_annotations(self, test_fn, update_fn):
updated_count = 0
not_updated_count = 0
malformed_count = 0
pods = self.k8s.get('/api/v1/pods')['items']
for pod in pods:
try:
obj = self._get_annotation(pod)
if not obj:
continue
except Exception:
malformed_count += 1
continue
if test_fn(obj):
obj = update_fn(obj)
serialized = obj.obj_to_primitive()
try:
ann = {
constants.K8S_ANNOTATION_VIF:
jsonutils.dumps(serialized)
}
self.k8s.annotate(
utils.get_res_link(pod), ann,
pod['metadata']['resourceVersion'])
except exceptions.K8sClientException:
print('Error when updating annotation for pod %s/%s' %
(pod['metadata']['namespace'],
pod['metadata']['name']))
not_updated_count += 1
updated_count += 1
t = prettytable.PrettyTable(['Stat', 'Number'],
hrules=prettytable.ALL)
t.align = 'l'
cells = [['Updated annotations', updated_count],
['Malformed annotations', malformed_count],
['Annotations left', not_updated_count]]
for cell in cells:
t.add_row(cell)
print(t)
def _has_valid_sriov_annot(self, state):
for obj in state.vifs.values():
if obj.obj_name() != objects.vif.VIFSriov.obj_name():
continue
if hasattr(obj, 'pod_name') and hasattr(obj, 'pod_link'):
continue
return False
return True
def _convert_sriov(self, state):
new_state = copy.deepcopy(state)
for iface, obj in new_state.additional_vifs.items():
if obj.obj_name() != objects.vif.VIFSriov.obj_name():
continue
if hasattr(obj, 'pod_name') and hasattr(obj, 'pod_link'):
continue
new_obj = objects.vif.VIFSriov()
new_obj.__dict__ = obj.__dict__.copy()
new_state.additional_vifs[iface] = new_obj
return new_state
def update_annotations(self):
def test_fn(obj):
return (obj.obj_name() != objects.vif.PodState.obj_name() or
not self._has_valid_sriov_annot(obj))
def update_fn(obj):
if obj.obj_name() != objects.vif.PodState.obj_name():
return vif.PodState(default_vif=obj)
return self._convert_sriov(obj)
self._convert_annotations(test_fn, update_fn)
def downgrade_annotations(self):
def test_fn(obj):
return obj.obj_name() == objects.vif.PodState.obj_name()
def update_fn(obj):
return obj.default_vif
self._convert_annotations(test_fn, update_fn)
def print_version():
print(version.version_info.version_string())
def add_parsers(subparsers):
upgrade_cmds = UpgradeCommands()
upgrade = subparsers.add_parser(
'upgrade', help='Actions related to upgrades between releases.')
sub = upgrade.add_subparsers()
check = sub.add_parser('check', help='Check if upgrading is possible.')
check.set_defaults(action_fn=upgrade_cmds.upgrade_check)
ann_update = sub.add_parser(
'update-annotations',
help='Update annotations in K8s API to newest version.')
ann_update.set_defaults(action_fn=upgrade_cmds.update_annotations)
ann_downgrade = sub.add_parser(
'downgrade-annotations',
help='Downgrade annotations in K8s API to previous version (useful '
'when reverting a failed upgrade).')
ann_downgrade.set_defaults(action_fn=upgrade_cmds.downgrade_annotations)
version_action = subparsers.add_parser('version')
version_action.set_defaults(action_fn=print_version)
def main():
opt = cfg.SubCommandOpt(
'category', title='command',
description='kuryr-k8s-status command or category to execute',
handler=add_parsers)
conf = cfg.ConfigOpts()
conf.register_cli_opt(opt)
conf(sys.argv[1:])
os_vif.initialize()
objects.register_locally_defined_vifs()
try:
return conf.category.action_fn()
except Exception:
print('Error:\n%s' % traceback.format_exc())
return 255
if __name__ == '__main__':
main()
| true
| true
|
1c4310f9d44999eb5196e3da1d1c3a90d53328cc
| 992
|
py
|
Python
|
sct_custom/unit_testing/test_sct_compute_mtsat.py
|
nidebroux/lumbosacral_segmentation
|
3217960c6f0f5c3886dfdf46e1286ad2f737f4aa
|
[
"Unlicense",
"MIT"
] | 1
|
2021-09-07T08:52:21.000Z
|
2021-09-07T08:52:21.000Z
|
sct_custom/unit_testing/test_sct_compute_mtsat.py
|
nidebroux/lumbosacral_segmentation
|
3217960c6f0f5c3886dfdf46e1286ad2f737f4aa
|
[
"Unlicense",
"MIT"
] | null | null | null |
sct_custom/unit_testing/test_sct_compute_mtsat.py
|
nidebroux/lumbosacral_segmentation
|
3217960c6f0f5c3886dfdf46e1286ad2f737f4aa
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import pytest
from spinalcordtoolbox.utils import sct_test_path
from spinalcordtoolbox.scripts import sct_compute_mtsat
out_mstat = "out_mtsat.nii.gz"
out_t1map = "out_t1map.nii.gz"
INPUT_PARAMS = [
['-mt', sct_test_path('mt', 'mt1.nii.gz'),
'-pd', sct_test_path('mt', 'mt0.nii.gz'),
'-t1', sct_test_path('mt', 't1w.nii.gz'),
'-omtsat', out_mstat,
'-ot1map', out_t1map],
['-mt', sct_test_path('mt', 'mt1.nii.gz'),
'-pd', sct_test_path('mt', 'mt0.nii.gz'),
'-t1', sct_test_path('mt', 't1w.nii.gz'),
'-omtsat', out_mstat,
'-ot1map', out_t1map,
'-trmt', '51', '-trpd', '52', '-trt1', '10', '-famt', '4', '-fapd', '5', '-fat1', '14'],
]
@pytest.mark.parametrize('input_params', INPUT_PARAMS)
def test_with_json_sidecar(input_params):
sct_compute_mtsat.main(input_params)
# Check if output files exist
for f in [out_mstat, out_t1map]:
assert os.path.isfile(f)
os.remove(f)
| 28.342857
| 93
| 0.628024
|
import os
import pytest
from spinalcordtoolbox.utils import sct_test_path
from spinalcordtoolbox.scripts import sct_compute_mtsat
out_mstat = "out_mtsat.nii.gz"
out_t1map = "out_t1map.nii.gz"
INPUT_PARAMS = [
['-mt', sct_test_path('mt', 'mt1.nii.gz'),
'-pd', sct_test_path('mt', 'mt0.nii.gz'),
'-t1', sct_test_path('mt', 't1w.nii.gz'),
'-omtsat', out_mstat,
'-ot1map', out_t1map],
['-mt', sct_test_path('mt', 'mt1.nii.gz'),
'-pd', sct_test_path('mt', 'mt0.nii.gz'),
'-t1', sct_test_path('mt', 't1w.nii.gz'),
'-omtsat', out_mstat,
'-ot1map', out_t1map,
'-trmt', '51', '-trpd', '52', '-trt1', '10', '-famt', '4', '-fapd', '5', '-fat1', '14'],
]
@pytest.mark.parametrize('input_params', INPUT_PARAMS)
def test_with_json_sidecar(input_params):
sct_compute_mtsat.main(input_params)
for f in [out_mstat, out_t1map]:
assert os.path.isfile(f)
os.remove(f)
| true
| true
|
1c43114d6bc49f1a731867467ab5fc9d2bcf9463
| 15,401
|
py
|
Python
|
example_cases/1D_exp_bubscreen/input_bak.py
|
ComputationalFlowPhysics/MFC-develop
|
41506c2c788f9b8d081cd4b9b2c8ff95ef3b19f2
|
[
"MIT"
] | 3
|
2021-05-20T23:42:47.000Z
|
2021-11-17T21:34:14.000Z
|
example_cases/1D_exp_bubscreen/input_bak.py
|
ComputationalFlowPhysics/MFC-develop
|
41506c2c788f9b8d081cd4b9b2c8ff95ef3b19f2
|
[
"MIT"
] | 28
|
2021-11-02T00:40:40.000Z
|
2021-12-06T02:38:57.000Z
|
example_cases/1D_exp_bubscreen/input_bak.py
|
ComputationalFlowPhysics/MFC-develop
|
901bff8d9e9d7519613cfcacc7a5463ab6295181
|
[
"MIT"
] | 9
|
2021-10-02T04:37:25.000Z
|
2021-11-23T00:58:11.000Z
|
#!/usr/bin/env python3
import math
x0 = 10.E-06
p0 = 101325.
rho0 = 1.E+03
c0 = math.sqrt( p0/rho0 )
patm = 1.
#water props
## AKA little \gamma (see coralic 2014 eq'n (13))
n_tait = 7.1
## AKA little \pi(see coralic 2014 eq'n (13))
B_tait = 306.E+06 / p0
mul0 = 1.002E-03 #viscosity
ss = 0.07275 #surface tension
# ss = 1.E-12 ## this would turn-off surface tension
pv = 2.3388E+03 #vapor pressure
# water
# These _v and _n parameters ONLY correspond to the bubble model of Preston (2010 maybe 2008)
# (this model would replace the usual Rayleigh-plesset or Keller-miksis model (it's more compilcated))
#gamma_v = 1.33
#M_v = 18.02
#mu_v = 0.8816E-05
#k_v = 0.019426
##air props
#gamma_n = 1.4
#M_n = 28.97
#mu_n = 1.8E-05
#k_n = 0.02556
#air props
gamma_gas = 1.4
#reference bubble size
R0ref = 10.E-06
pa = 0.1 * 1.E+06 / 101325.
print(('pa',pa))
#Characteristic velocity
uu = math.sqrt( p0/rho0 )
#Cavitation number
Ca = (p0 - pv)/(rho0*(uu**2.))
#Weber number
We = rho0*(uu**2.)*R0ref/ss
#Inv. bubble Reynolds number
Re_inv = mul0/(rho0*uu*R0ref)
#IC setup
vf0 = 0.00004
n0 = vf0/(math.pi*4.E+00/3.E+00)
cphysical = 1475.
t0 = x0/c0
nbubbles = 1
myr0 = R0ref
# CFL numebr should be < 1 for numerical stability
# CFL = speed of sound * dt/dx
cfl = 0.1
Nx = 100
Ldomain = 20.E-03
L = Ldomain/x0
dx = L/float(Nx)
dt = cfl*dx/(cphysical/c0)
Lpulse = 0.3*Ldomain
Tpulse = Lpulse/cphysical
Tfinal = 0.25*10.*Tpulse*c0/x0
Nt = int(Tfinal/dt)
Nfiles = 20.
Nout = int(math.ceil(Nt/Nfiles))
Nt = int(Nout*Nfiles)
# Command to navigate between directories
from os import chdir
# Command to acquire directory path
from os.path import dirname
# Command to acquire script name and module search path
from sys import argv, path
# Navigating to script directory
if len(dirname(argv[0])) != 0: chdir(dirname(argv[0]))
# Adding master_scripts directory to module search path
mfc_dir = '../../src'; path[:0] = [mfc_dir + '/master_scripts']
# Command to execute the MFC components
from m_python_proxy import f_execute_mfc_component
# ==============================================================================
# Case Analysis Configuration ==================================================
# Selecting MFC component
comp_name = argv[1].strip()
# Serial or parallel computational engine
engine = 'serial'
if (comp_name=='pre_process'): engine = 'serial'
# Configuring case dictionary
case_dict = \
{ \
# Logistics ================================================
'case_dir' : '\'.\'', \
'run_time_info' : 'T', \
'nodes' : 1, \
# processes per node... > 1 indicates parallel (avoid this for now)
'ppn' : 1, \
'queue' : 'normal', \
'walltime' : '24:00:00', \
'mail_list' : '', \
# ==========================================================
\
# Computational Domain Parameters ==========================
'x_domain%beg' : -10.E-03/x0, \
'x_domain%end' : 10.E-03/x0, \
'stretch_x' : 'F', \
'cyl_coord' : 'F', \
'm' : Nx, \
'n' : 0, \
'p' : 0, \
'dt' : dt, \
't_step_start' : 0, \
't_step_stop' : Nt, \
't_step_save' : Nout, \
# ==========================================================
\
# Simulation Algorithm Parameters ==========================
'num_patches' : 2, \
'model_eqns' : 2, \
'alt_soundspeed' : 'F', \
'num_fluids' : 1, \
'adv_alphan' : 'T', \
'mpp_lim' : 'F', \
'mixture_err' : 'F', \
'time_stepper' : 3, \
'weno_vars' : 2, \
'weno_order' : 5, \
'weno_eps' : 1.E-16, \
'char_decomp' : 'F', \
'mapped_weno' : 'T', \
'null_weights' : 'F', \
'mp_weno' : 'T', \
'riemann_solver' : 2, \
'wave_speeds' : 1, \
'avg_state' : 2, \
'commute_err' : 'F', \
'split_err' : 'F', \
'bc_x%beg' : -8, \
'bc_x%end' : -8, \
# ==========================================================
\
# Formatted Database Files Structure Parameters ============
'format' : 1, \
'precision' : 2, \
'prim_vars_wrt' :'T', \
'parallel_io' :'F', \
'fd_order' : 1, \
#'schlieren_wrt' :'T', \
'probe_wrt' :'T', \
'num_probes' : 1, \
'probe(1)%x' : 0., \
# ==========================================================
# Patch 1 _ Background =====================================
# this problem is 1D... so based on the dimension of the problem
# you have different 'geometries' available to you
# e.g. in 3D you might have spherical geometries
# and rectangular ones
# in 1D (like here)... there is only one option {#1}... which is a
# line
'patch_icpp(1)%geometry' : 1, \
'patch_icpp(1)%x_centroid' : 0., \
'patch_icpp(1)%length_x' : 20.E-03/x0, \
'patch_icpp(1)%vel(1)' : 0.0, \
'patch_icpp(1)%pres' : patm, \
# \alpha stands for volume fraction of this phase
# so if there are no bubbles, then it is all water (liquid)
# and \alpha_1 = \alpha_liquid \approx 1
'patch_icpp(1)%alpha_rho(1)' : (1.-1.E-12)*(1.E+03/rho0), \
# \alpha_1 here is always (for num_fluids = 1 and bubbles=True)
# \alpha is always the void fraction of bubbles (usually << 1)
'patch_icpp(1)%alpha(1)' : 1.E-12, \
# dimensionless initial bubble radius
'patch_icpp(1)%r0' : 1., \
# dimensionless initial velocity
'patch_icpp(1)%v0' : 0.0E+00, \
# ==========================================================
# Patch 2 Screen ===========================================
'patch_icpp(2)%geometry' : 1, \
#overwrite the part in the middle that was the
#background (no bubble) area
'patch_icpp(2)%alter_patch(1)' : 'T', \
'patch_icpp(2)%x_centroid' : 0., \
'patch_icpp(2)%length_x' : 5.E-03/x0, \
'patch_icpp(2)%vel(1)' : 0.0, \
'patch_icpp(2)%pres' : patm, \
# \alpha stands for volume fraction of this phase
# so if there are no bubbles, then it is all water (liquid)
# and \alpha_1 = \alpha_liquid \approx 1
# in the screen case, you have \alpha_1 = 1 - \alpha_bubbles = 1 - vf0
'patch_icpp(2)%alpha_rho(1)' : (1.-vf0)*1.E+03/rho0, \
# void fraction of bubbles
'patch_icpp(2)%alpha(1)' : vf0, \
'patch_icpp(2)%r0' : 1., \
'patch_icpp(2)%v0' : 0.0E+00, \
# ==========================================================
# Fluids Physical Parameters ===============================
# Surrounding liquid
'fluid_pp(1)%gamma' : 1.E+00/(n_tait-1.E+00), \
'fluid_pp(1)%pi_inf' : n_tait*B_tait/(n_tait-1.), \
# 'fluid_pp(1)%mul0' : mul0, \
# 'fluid_pp(1)%ss' : ss, \
# 'fluid_pp(1)%pv' : pv, \
# 'fluid_pp(1)%gamma_v' : gamma_v, \
# 'fluid_pp(1)%M_v' : M_v, \
# 'fluid_pp(1)%mu_v' : mu_v, \
# 'fluid_pp(1)%k_v' : k_v, \
# Last fluid_pp is always reserved for bubble gas state ===
# if applicable ==========================================
'fluid_pp(2)%gamma' : 1./(gamma_gas-1.), \
'fluid_pp(2)%pi_inf' : 0.0E+00, \
# 'fluid_pp(2)%gamma_v' : gamma_n, \
# 'fluid_pp(2)%M_v' : M_n, \
# 'fluid_pp(2)%mu_v' : mu_n, \
# 'fluid_pp(2)%k_v' : k_n, \
# ==========================================================
# Non-polytropic gas compression model AND/OR Tait EOS =====
'pref' : p0, \
'rhoref' : rho0, \
# ==========================================================
# Bubbles ==================================================
'bubbles' : 'T', \
# in user guide... 1 = gilbert 2 = keller-miksis
# but gilbert won't work for the equations that you are using... (i think)
'bubble_model' : 2, \
# polytropic: this is where the different between Rayleigh--Plesset and
# Preston's model shows up. polytropic = False means complicated Preston model
# = True means simpler Rayleigh--Plesset model
# if polytropic == False then you will end up calling s_initialize_nonpoly in
# m_global_parameters.f90 in both the pre_process and simulation_code
'polytropic' : 'T', \
'polydisperse' : 'F', \
#'poly_sigma' : 0.3, \
# only matters if polytropic = False (complicated model)
# 'thermal' : 3, \
# only matters if polytropic = False (complicated model)
'R0ref' : myr0, \
'nb' : 1, \
# cavitation number (has something to do with the ratio of gas to vapour in the bubble)
# this is usually near 1
# can set = 1 for testing purposes
'Ca' : Ca, \
# weber number (corresponds to surface tension)
'Web' : We, \
# inverse reynolds number (coresponds to viscosity)
'Re_inv' : Re_inv, \
# ==========================================================
# Acoustic source ==========================================
'Monopole' : 'T', \
'num_mono' : 1, \
'Mono(1)%loc(1)' : -5.E-03/x0, \
'Mono(1)%npulse' : 1, \
'Mono(1)%dir' : 1., \
'Mono(1)%pulse' : 1, \
'Mono(1)%mag' : pa, \
'Mono(1)%length' : (1./(300000.))*cphysical/x0, \
# ==========================================================
}
# Executing MFC component
f_execute_mfc_component(comp_name, case_dict, mfc_dir, engine)
# ==============================================================================
| 52.384354
| 107
| 0.310305
|
import math
x0 = 10.E-06
p0 = 101325.
rho0 = 1.E+03
c0 = math.sqrt( p0/rho0 )
patm = 1.
4 eq'n (13))
B_tait = 306.E+06 / p0
mul0 = 1.002E-03
ss = 0.07275
.8816E-05
#k_v = 0.019426
##air props
#gamma_n = 1.4
#M_n = 28.97
#mu_n = 1.8E-05
#k_n = 0.02556
#air props
gamma_gas = 1.4
#reference bubble size
R0ref = 10.E-06
pa = 0.1 * 1.E+06 / 101325.
print(('pa',pa))
#Characteristic velocity
uu = math.sqrt( p0/rho0 )
#Cavitation number
Ca = (p0 - pv)/(rho0*(uu**2.))
#Weber number
We = rho0*(uu**2.)*R0ref/ss
#Inv. bubble Reynolds number
Re_inv = mul0/(rho0*uu*R0ref)
#IC setup
vf0 = 0.00004
n0 = vf0/(math.pi*4.E+00/3.E+00)
cphysical = 1475.
t0 = x0/c0
nbubbles = 1
myr0 = R0ref
# CFL numebr should be < 1 for numerical stability
# CFL = speed of sound * dt/dx
cfl = 0.1
Nx = 100
Ldomain = 20.E-03
L = Ldomain/x0
dx = L/float(Nx)
dt = cfl*dx/(cphysical/c0)
Lpulse = 0.3*Ldomain
Tpulse = Lpulse/cphysical
Tfinal = 0.25*10.*Tpulse*c0/x0
Nt = int(Tfinal/dt)
Nfiles = 20.
Nout = int(math.ceil(Nt/Nfiles))
Nt = int(Nout*Nfiles)
# Command to navigate between directories
from os import chdir
# Command to acquire directory path
from os.path import dirname
# Command to acquire script name and module search path
from sys import argv, path
# Navigating to script directory
if len(dirname(argv[0])) != 0: chdir(dirname(argv[0]))
# Adding master_scripts directory to module search path
mfc_dir = '../../src'; path[:0] = [mfc_dir + '/master_scripts']
# Command to execute the MFC components
from m_python_proxy import f_execute_mfc_component
# ==============================================================================
# Case Analysis Configuration ==================================================
# Selecting MFC component
comp_name = argv[1].strip()
# Serial or parallel computational engine
engine = 'serial'
if (comp_name=='pre_process'): engine = 'serial'
# Configuring case dictionary
case_dict = \
{ \
# Logistics ================================================
'case_dir' : '\'.\'', \
'run_time_info' : 'T', \
'nodes' : 1, \
# processes per node... > 1 indicates parallel (avoid this for now)
'ppn' : 1, \
'queue' : 'normal', \
'walltime' : '24:00:00', \
'mail_list' : '', \
# ==========================================================
\
# Computational Domain Parameters ==========================
'x_domain%beg' : -10.E-03/x0, \
'x_domain%end' : 10.E-03/x0, \
'stretch_x' : 'F', \
'cyl_coord' : 'F', \
'm' : Nx, \
'n' : 0, \
'p' : 0, \
'dt' : dt, \
't_step_start' : 0, \
't_step_stop' : Nt, \
't_step_save' : Nout, \
# ==========================================================
\
# Simulation Algorithm Parameters ==========================
'num_patches' : 2, \
'model_eqns' : 2, \
'alt_soundspeed' : 'F', \
'num_fluids' : 1, \
'adv_alphan' : 'T', \
'mpp_lim' : 'F', \
'mixture_err' : 'F', \
'time_stepper' : 3, \
'weno_vars' : 2, \
'weno_order' : 5, \
'weno_eps' : 1.E-16, \
'char_decomp' : 'F', \
'mapped_weno' : 'T', \
'null_weights' : 'F', \
'mp_weno' : 'T', \
'riemann_solver' : 2, \
'wave_speeds' : 1, \
'avg_state' : 2, \
'commute_err' : 'F', \
'split_err' : 'F', \
'bc_x%beg' : -8, \
'bc_x%end' : -8, \
# ==========================================================
\
# Formatted Database Files Structure Parameters ============
'format' : 1, \
'precision' : 2, \
'prim_vars_wrt' :'T', \
'parallel_io' :'F', \
'fd_order' : 1, \
#'schlieren_wrt' :'T', \
'probe_wrt' :'T', \
'num_probes' : 1, \
'probe(1)%x' : 0., \
# ==========================================================
# Patch 1 _ Background =====================================
# this problem is 1D... so based on the dimension of the problem
# you have different 'geometries' available to you
# e.g. in 3D you might have spherical geometries
# and rectangular ones
# in 1D (like here)... there is only one option {#1}... which is a
# line
'patch_icpp(1)%geometry' : 1, \
'patch_icpp(1)%x_centroid' : 0., \
'patch_icpp(1)%length_x' : 20.E-03/x0, \
'patch_icpp(1)%vel(1)' : 0.0, \
'patch_icpp(1)%pres' : patm, \
# \alpha stands for volume fraction of this phase
# so if there are no bubbles, then it is all water (liquid)
# and \alpha_1 = \alpha_liquid \approx 1
'patch_icpp(1)%alpha_rho(1)' : (1.-1.E-12)*(1.E+03/rho0), \
# \alpha_1 here is always (for num_fluids = 1 and bubbles=True)
# \alpha is always the void fraction of bubbles (usually << 1)
'patch_icpp(1)%alpha(1)' : 1.E-12, \
# dimensionless initial bubble radius
'patch_icpp(1)%r0' : 1., \
# dimensionless initial velocity
'patch_icpp(1)%v0' : 0.0E+00, \
# ==========================================================
# Patch 2 Screen ===========================================
'patch_icpp(2)%geometry' : 1, \
#overwrite the part in the middle that was the
#background (no bubble) area
'patch_icpp(2)%alter_patch(1)' : 'T', \
'patch_icpp(2)%x_centroid' : 0., \
'patch_icpp(2)%length_x' : 5.E-03/x0, \
'patch_icpp(2)%vel(1)' : 0.0, \
'patch_icpp(2)%pres' : patm, \
# \alpha stands for volume fraction of this phase
# so if there are no bubbles, then it is all water (liquid)
# and \alpha_1 = \alpha_liquid \approx 1
# in the screen case, you have \alpha_1 = 1 - \alpha_bubbles = 1 - vf0
'patch_icpp(2)%alpha_rho(1)' : (1.-vf0)*1.E+03/rho0, \
# void fraction of bubbles
'patch_icpp(2)%alpha(1)' : vf0, \
'patch_icpp(2)%r0' : 1., \
'patch_icpp(2)%v0' : 0.0E+00, \
# ==========================================================
# Fluids Physical Parameters ===============================
# Surrounding liquid
'fluid_pp(1)%gamma' : 1.E+00/(n_tait-1.E+00), \
'fluid_pp(1)%pi_inf' : n_tait*B_tait/(n_tait-1.), \
# 'fluid_pp(1)%mul0' : mul0, \
# 'fluid_pp(1)%ss' : ss, \
# 'fluid_pp(1)%pv' : pv, \
# 'fluid_pp(1)%gamma_v' : gamma_v, \
# 'fluid_pp(1)%M_v' : M_v, \
# 'fluid_pp(1)%mu_v' : mu_v, \
# 'fluid_pp(1)%k_v' : k_v, \
# Last fluid_pp is always reserved for bubble gas state ===
# if applicable ==========================================
'fluid_pp(2)%gamma' : 1./(gamma_gas-1.), \
'fluid_pp(2)%pi_inf' : 0.0E+00, \
# 'fluid_pp(2)%gamma_v' : gamma_n, \
# 'fluid_pp(2)%M_v' : M_n, \
# 'fluid_pp(2)%mu_v' : mu_n, \
# 'fluid_pp(2)%k_v' : k_n, \
# ==========================================================
# Non-polytropic gas compression model AND/OR Tait EOS =====
'pref' : p0, \
'rhoref' : rho0, \
# ==========================================================
# Bubbles ==================================================
'bubbles' : 'T', \
# in user guide... 1 = gilbert 2 = keller-miksis
# but gilbert won't work for the equations that you are using... (i think)
'bubble_model' : 2, \
# = True means simpler Rayleigh--Plesset model
# if polytropic == False then you will end up calling s_initialize_nonpoly in
# m_global_parameters.f90 in both the pre_process and simulation_code
'polytropic' : 'T', \
'polydisperse' : 'F', \
#'poly_sigma' : 0.3, \
# only matters if polytropic = False (complicated model)
# 'thermal' : 3, \
# only matters if polytropic = False (complicated model)
'R0ref' : myr0, \
'nb' : 1, \
# cavitation number (has something to do with the ratio of gas to vapour in the bubble)
# this is usually near 1
# can set = 1 for testing purposes
'Ca' : Ca, \
# weber number (corresponds to surface tension)
'Web' : We, \
# inverse reynolds number (coresponds to viscosity)
'Re_inv' : Re_inv, \
# ==========================================================
# Acoustic source ==========================================
'Monopole' : 'T', \
'num_mono' : 1, \
'Mono(1)%loc(1)' : -5.E-03/x0, \
'Mono(1)%npulse' : 1, \
'Mono(1)%dir' : 1., \
'Mono(1)%pulse' : 1, \
'Mono(1)%mag' : pa, \
'Mono(1)%length' : (1./(300000.))*cphysical/x0, \
# ==========================================================
}
# Executing MFC component
f_execute_mfc_component(comp_name, case_dict, mfc_dir, engine)
# ==============================================================================
| true
| true
|
1c4311c385772f0780d0f5fd3ed496e89c9bb98c
| 4,129
|
py
|
Python
|
mmdet/models/losses/eqlv2.py
|
zhaohongyin/mmdetection-2.15
|
9fd29bfd373a6ad00674471c04ecc916f8ad413e
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/losses/eqlv2.py
|
zhaohongyin/mmdetection-2.15
|
9fd29bfd373a6ad00674471c04ecc916f8ad413e
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/losses/eqlv2.py
|
zhaohongyin/mmdetection-2.15
|
9fd29bfd373a6ad00674471c04ecc916f8ad413e
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from mmdet.utils import get_root_logger
from functools import partial
from ..builder import LOSSES
@LOSSES.register_module()
class EQLv2(nn.Module):
def __init__(self,
use_sigmoid=True,
reduction='mean',
class_weight=None,
loss_weight=1.0,
num_classes=1203, # 1203 for lvis v1.0, 1230 for lvis v0.5
gamma=12,
mu=0.8,
alpha=4.0,
vis_grad=False):
super().__init__()
self.use_sigmoid = True
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.num_classes = num_classes
self.group = True
# cfg for eqlv2
self.vis_grad = vis_grad
self.gamma = gamma
self.mu = mu
self.alpha = alpha
# initial variables
self._pos_grad = None
self._neg_grad = None
self.pos_neg = None
def _func(x, gamma, mu):
return 1 / (1 + torch.exp(-gamma * (x - mu)))
self.map_func = partial(_func, gamma=self.gamma, mu=self.mu)
logger = get_root_logger()
logger.info(f"build EQL v2, gamma: {gamma}, mu: {mu}, alpha: {alpha}")
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
self.n_i, self.n_c = cls_score.size()
self.gt_classes = label
self.pred_class_logits = cls_score
#import pdb
#pdb.set_trace()
def expand_label(pred, gt_classes):
target = pred.new_zeros(self.n_i, self.n_c)
target[torch.arange(self.n_i), gt_classes] = 1
return target
target = expand_label(cls_score, label)
pos_w, neg_w = self.get_weight(cls_score)
weight = pos_w * target + neg_w * (1 - target)
cls_loss = F.binary_cross_entropy_with_logits(cls_score, target,
reduction='none')
cls_loss = torch.sum(cls_loss * weight) / self.n_i
self.collect_grad(cls_score.detach(), target.detach(), weight.detach())
return self.loss_weight * cls_loss
def get_channel_num(self, num_classes):
num_channel = num_classes + 1
return num_channel
def get_activation(self, cls_score):
cls_score = torch.sigmoid(cls_score)
n_i, n_c = cls_score.size()
bg_score = cls_score[:, -1].view(n_i, 1)
cls_score[:, :-1] *= (1 - bg_score)
return cls_score
def collect_grad(self, cls_score, target, weight):
prob = torch.sigmoid(cls_score)
grad = target * (prob - 1) + (1 - target) * prob
grad = torch.abs(grad)
# do not collect grad for objectiveness branch [:-1]
pos_grad = torch.sum(grad * target * weight, dim=0)[:-1]
neg_grad = torch.sum(grad * (1 - target) * weight, dim=0)[:-1]
dist.all_reduce(pos_grad)
dist.all_reduce(neg_grad)
self._pos_grad += pos_grad
self._neg_grad += neg_grad
self.pos_neg = self._pos_grad / (self._neg_grad + 1e-10)
def get_weight(self, cls_score):
# we do not have information about pos grad and neg grad at beginning
if self._pos_grad is None:
self._pos_grad = cls_score.new_zeros(self.num_classes)
self._neg_grad = cls_score.new_zeros(self.num_classes)
neg_w = cls_score.new_ones((self.n_i, self.n_c))
pos_w = cls_score.new_ones((self.n_i, self.n_c))
else:
# the negative weight for objectiveness is always 1
neg_w = torch.cat([self.map_func(self.pos_neg), cls_score.new_ones(1)])
pos_w = 1 + self.alpha * (1 - neg_w)
neg_w = neg_w.view(1, -1).expand(self.n_i, self.n_c)
pos_w = pos_w.view(1, -1).expand(self.n_i, self.n_c)
return pos_w, neg_w
| 33.844262
| 83
| 0.578106
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from mmdet.utils import get_root_logger
from functools import partial
from ..builder import LOSSES
@LOSSES.register_module()
class EQLv2(nn.Module):
def __init__(self,
use_sigmoid=True,
reduction='mean',
class_weight=None,
loss_weight=1.0,
num_classes=1203,
gamma=12,
mu=0.8,
alpha=4.0,
vis_grad=False):
super().__init__()
self.use_sigmoid = True
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.num_classes = num_classes
self.group = True
self.vis_grad = vis_grad
self.gamma = gamma
self.mu = mu
self.alpha = alpha
self._pos_grad = None
self._neg_grad = None
self.pos_neg = None
def _func(x, gamma, mu):
return 1 / (1 + torch.exp(-gamma * (x - mu)))
self.map_func = partial(_func, gamma=self.gamma, mu=self.mu)
logger = get_root_logger()
logger.info(f"build EQL v2, gamma: {gamma}, mu: {mu}, alpha: {alpha}")
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
self.n_i, self.n_c = cls_score.size()
self.gt_classes = label
self.pred_class_logits = cls_score
def expand_label(pred, gt_classes):
target = pred.new_zeros(self.n_i, self.n_c)
target[torch.arange(self.n_i), gt_classes] = 1
return target
target = expand_label(cls_score, label)
pos_w, neg_w = self.get_weight(cls_score)
weight = pos_w * target + neg_w * (1 - target)
cls_loss = F.binary_cross_entropy_with_logits(cls_score, target,
reduction='none')
cls_loss = torch.sum(cls_loss * weight) / self.n_i
self.collect_grad(cls_score.detach(), target.detach(), weight.detach())
return self.loss_weight * cls_loss
def get_channel_num(self, num_classes):
num_channel = num_classes + 1
return num_channel
def get_activation(self, cls_score):
cls_score = torch.sigmoid(cls_score)
n_i, n_c = cls_score.size()
bg_score = cls_score[:, -1].view(n_i, 1)
cls_score[:, :-1] *= (1 - bg_score)
return cls_score
def collect_grad(self, cls_score, target, weight):
prob = torch.sigmoid(cls_score)
grad = target * (prob - 1) + (1 - target) * prob
grad = torch.abs(grad)
pos_grad = torch.sum(grad * target * weight, dim=0)[:-1]
neg_grad = torch.sum(grad * (1 - target) * weight, dim=0)[:-1]
dist.all_reduce(pos_grad)
dist.all_reduce(neg_grad)
self._pos_grad += pos_grad
self._neg_grad += neg_grad
self.pos_neg = self._pos_grad / (self._neg_grad + 1e-10)
def get_weight(self, cls_score):
if self._pos_grad is None:
self._pos_grad = cls_score.new_zeros(self.num_classes)
self._neg_grad = cls_score.new_zeros(self.num_classes)
neg_w = cls_score.new_ones((self.n_i, self.n_c))
pos_w = cls_score.new_ones((self.n_i, self.n_c))
else:
neg_w = torch.cat([self.map_func(self.pos_neg), cls_score.new_ones(1)])
pos_w = 1 + self.alpha * (1 - neg_w)
neg_w = neg_w.view(1, -1).expand(self.n_i, self.n_c)
pos_w = pos_w.view(1, -1).expand(self.n_i, self.n_c)
return pos_w, neg_w
| true
| true
|
1c4312172dc45ac8b188a1e31e311d39a6f89ea9
| 3,943
|
py
|
Python
|
designate/storage/__init__.py
|
ISCAS-VDI/designate-base
|
bd945607e3345fbef8645c3441e96b032b70b098
|
[
"Apache-2.0"
] | null | null | null |
designate/storage/__init__.py
|
ISCAS-VDI/designate-base
|
bd945607e3345fbef8645c3441e96b032b70b098
|
[
"Apache-2.0"
] | null | null | null |
designate/storage/__init__.py
|
ISCAS-VDI/designate-base
|
bd945607e3345fbef8645c3441e96b032b70b098
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import threading
import time
from oslo_log import log as logging
from oslo_db import exception as db_exception
from oslo_utils import excutils
from designate.storage.base import Storage
from designate.i18n import _LW
LOG = logging.getLogger(__name__)
RETRY_STATE = threading.local()
def get_storage(storage_driver):
"""Return the engine class from the provided engine name"""
cls = Storage.get_driver(storage_driver)
return cls()
def _retry_on_deadlock(exc):
"""Filter to trigger retry a when a Deadlock is received."""
# TODO(kiall): This is a total leak of the SQLA Driver, we'll need a better
# way to handle this.
if isinstance(exc, db_exception.DBDeadlock):
LOG.warning(_LW("Deadlock detected. Retrying..."))
return True
return False
def retry(cb=None, retries=50, delay=150):
"""A retry decorator that ignores attempts at creating nested retries"""
def outer(f):
@functools.wraps(f)
def retry_wrapper(self, *args, **kwargs):
if not hasattr(RETRY_STATE, 'held'):
# Create the state vars if necessary
RETRY_STATE.held = False
RETRY_STATE.retries = 0
if not RETRY_STATE.held:
# We're the outermost retry decorator
RETRY_STATE.held = True
try:
while True:
try:
result = f(self, *copy.deepcopy(args),
**copy.deepcopy(kwargs))
break
except Exception as exc:
RETRY_STATE.retries += 1
if RETRY_STATE.retries >= retries:
# Exceeded retry attempts, raise.
raise
elif cb is not None and cb(exc) is False:
# We're not setup to retry on this exception.
raise
else:
# Retry, with a delay.
time.sleep(delay / float(1000))
finally:
RETRY_STATE.held = False
RETRY_STATE.retries = 0
else:
# We're an inner retry decorator, just pass on through.
result = f(self, *copy.deepcopy(args), **copy.deepcopy(kwargs))
return result
retry_wrapper.__wrapped_function = f
retry_wrapper.__wrapper_name = 'retry'
return retry_wrapper
return outer
def transaction(f):
"""Transaction decorator, to be used on class instances with a
self.storage attribute
"""
@retry(cb=_retry_on_deadlock)
@functools.wraps(f)
def transaction_wrapper(self, *args, **kwargs):
self.storage.begin()
try:
result = f(self, *args, **kwargs)
self.storage.commit()
return result
except Exception:
with excutils.save_and_reraise_exception():
self.storage.rollback()
transaction_wrapper.__wrapped_function = f
transaction_wrapper.__wrapper_name = 'transaction'
return transaction_wrapper
| 33.700855
| 79
| 0.587116
|
import copy
import functools
import threading
import time
from oslo_log import log as logging
from oslo_db import exception as db_exception
from oslo_utils import excutils
from designate.storage.base import Storage
from designate.i18n import _LW
LOG = logging.getLogger(__name__)
RETRY_STATE = threading.local()
def get_storage(storage_driver):
cls = Storage.get_driver(storage_driver)
return cls()
def _retry_on_deadlock(exc):
# way to handle this.
if isinstance(exc, db_exception.DBDeadlock):
LOG.warning(_LW("Deadlock detected. Retrying..."))
return True
return False
def retry(cb=None, retries=50, delay=150):
def outer(f):
@functools.wraps(f)
def retry_wrapper(self, *args, **kwargs):
if not hasattr(RETRY_STATE, 'held'):
# Create the state vars if necessary
RETRY_STATE.held = False
RETRY_STATE.retries = 0
if not RETRY_STATE.held:
# We're the outermost retry decorator
RETRY_STATE.held = True
try:
while True:
try:
result = f(self, *copy.deepcopy(args),
**copy.deepcopy(kwargs))
break
except Exception as exc:
RETRY_STATE.retries += 1
if RETRY_STATE.retries >= retries:
raise
elif cb is not None and cb(exc) is False:
raise
else:
# Retry, with a delay.
time.sleep(delay / float(1000))
finally:
RETRY_STATE.held = False
RETRY_STATE.retries = 0
else:
# We're an inner retry decorator, just pass on through.
result = f(self, *copy.deepcopy(args), **copy.deepcopy(kwargs))
return result
retry_wrapper.__wrapped_function = f
retry_wrapper.__wrapper_name = 'retry'
return retry_wrapper
return outer
def transaction(f):
@retry(cb=_retry_on_deadlock)
@functools.wraps(f)
def transaction_wrapper(self, *args, **kwargs):
self.storage.begin()
try:
result = f(self, *args, **kwargs)
self.storage.commit()
return result
except Exception:
with excutils.save_and_reraise_exception():
self.storage.rollback()
transaction_wrapper.__wrapped_function = f
transaction_wrapper.__wrapper_name = 'transaction'
return transaction_wrapper
| true
| true
|
1c4312cb083a253b3691adb88cdcfbc5b0aa0c66
| 798
|
py
|
Python
|
sugar/sugar/urls.py
|
Nazira06/sweet-sugar
|
9822390356effae379bff1ebcda276b5d6dee8ce
|
[
"MIT"
] | null | null | null |
sugar/sugar/urls.py
|
Nazira06/sweet-sugar
|
9822390356effae379bff1ebcda276b5d6dee8ce
|
[
"MIT"
] | null | null | null |
sugar/sugar/urls.py
|
Nazira06/sweet-sugar
|
9822390356effae379bff1ebcda276b5d6dee8ce
|
[
"MIT"
] | null | null | null |
"""sugar URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('sweet_girl.urls')),
]
| 34.695652
| 77
| 0.703008
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('sweet_girl.urls')),
]
| true
| true
|
1c431471364d40e49996e90ea2929734f7ea9e2b
| 6,864
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/flaviramulusichthyoenteri.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/flaviramulusichthyoenteri.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/flaviramulusichthyoenteri.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Flaviramulus ichthyoenteri.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:17:04.626387
The undirected graph Flaviramulus ichthyoenteri has 3418 nodes and 379936
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.06506 and has 10 connected components, where the component
with most nodes has 3389 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 205, the mean node degree is 222.31,
and the node degree mode is 7. The top 5 most central nodes are 1380600.AUYN01000007_gene3417
(degree 1179), 1380600.AUYN01000009_gene1316 (degree 1178), 1380600.AUYN01000009_gene1730
(degree 1098), 1380600.AUYN01000009_gene961 (degree 1005) and 1380600.AUYN01000003_gene129
(degree 955).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import FlaviramulusIchthyoenteri
# Then load the graph
graph = FlaviramulusIchthyoenteri()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def FlaviramulusIchthyoenteri(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Flaviramulus ichthyoenteri graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Flaviramulus ichthyoenteri graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:17:04.626387
The undirected graph Flaviramulus ichthyoenteri has 3418 nodes and 379936
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.06506 and has 10 connected components, where the component
with most nodes has 3389 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 205, the mean node degree is 222.31,
and the node degree mode is 7. The top 5 most central nodes are 1380600.AUYN01000007_gene3417
(degree 1179), 1380600.AUYN01000009_gene1316 (degree 1178), 1380600.AUYN01000009_gene1730
(degree 1098), 1380600.AUYN01000009_gene961 (degree 1005) and 1380600.AUYN01000003_gene129
(degree 955).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import FlaviramulusIchthyoenteri
# Then load the graph
graph = FlaviramulusIchthyoenteri()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="FlaviramulusIchthyoenteri",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.937173
| 223
| 0.711247
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def FlaviramulusIchthyoenteri(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="FlaviramulusIchthyoenteri",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true
| true
|
1c43147d60936250716fa29247d7b6a5f06f230c
| 5,472
|
py
|
Python
|
gridpath/auxiliary/dynamic_components.py
|
anamileva/gridpath
|
e55eacb88ca5e6c034a90b18819e17cbd6f43854
|
[
"Apache-2.0"
] | null | null | null |
gridpath/auxiliary/dynamic_components.py
|
anamileva/gridpath
|
e55eacb88ca5e6c034a90b18819e17cbd6f43854
|
[
"Apache-2.0"
] | null | null | null |
gridpath/auxiliary/dynamic_components.py
|
anamileva/gridpath
|
e55eacb88ca5e6c034a90b18819e17cbd6f43854
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module creates the DynamicComponents class, which contains the lists and
dictionaries of the names of dynamic optimization components. These are
components that are populated by GridPath modules based on the selected
features and the scenario input data.
"""
from builtins import object
# Create global variables for the dynamic component names, so that we can
# more easily import the correct names into other modules
capacity_type_operational_period_sets = "capacity_type_operational_period_sets"
storage_only_capacity_type_operational_period_sets = \
"storage_only_capacity_type_operational_period_sets"
headroom_variables = "headroom_variables"
footroom_variables = "footroom_variables"
reserve_variable_derate_params = "reserve_variable_derate_params"
reserve_to_energy_adjustment_params = \
"reserve_to_energy_adjustment_params"
prm_cost_group_sets = "prm_cost_groups"
prm_cost_group_prm_type = "prm_cost_group_prm_type"
tx_capacity_type_operational_period_sets = \
"tx_capacity_type_operational_period_sets"
load_balance_production_components = "load_balance_production_components"
load_balance_consumption_components = "load_balance_consumption_components"
carbon_cap_balance_emission_components = \
"carbon_cap_balance_emission_components"
prm_balance_provision_components = \
"prm_balance_provision_components"
local_capacity_balance_provision_components = \
"local_capacity_balance_provision_components"
cost_components = "cost_components"
revenue_components = "revenue_components"
class DynamicComponents(object):
"""
Here we initialize the class object and its components that will contain
the dynamic model components, i.e. lists and dictionary with the names
of the optimization components that are populated based on whether features
are selected (i.e. certain modules are called) and based on the scenario
input data.
"""
def __init__(self):
"""
Initialize the dynamic components.
"""
# ### Project sets and variables ### #
# These are the names of the sets of project-operational_period by
# capacity type;
# The sets will be joined to make the final
# project-operational_period set that includes all projects
# If called, the capacity-type modules will populate these lists with
# the name of the respective set for the capacity type
setattr(self, capacity_type_operational_period_sets, list())
setattr(self, storage_only_capacity_type_operational_period_sets,
list())
# PRM cost groups
setattr(self, prm_cost_group_sets, list())
setattr(self, prm_cost_group_prm_type, dict())
# ### Operating reserves ### #
# Headroom and footroom variables
# These will include the project as keys and a list as value for
# each project; the list could be empty if the project is not
# providing any reserves, or will include the names of the
# respective reserve-provision variable if the reserve-type is
# modeled and a project can provide it
setattr(self, headroom_variables, dict())
setattr(self, footroom_variables, dict())
# A reserve-provision derate parameter and a
# reserve-to-energy-adjustment parameter could also be assigned to
# project, so we make dictionaries that will link the
# reserve-provision variable names to a derate-param name (i.e. the
# regulation up variable will be linked to a regulation-up
# parameter, the spinning-reserves variable will be linked to a
# spinning reserves paramater, etc.)
setattr(self, reserve_variable_derate_params, dict())
setattr(self, reserve_to_energy_adjustment_params, dict())
# ### Transmission sets and variables ### #
setattr(self, tx_capacity_type_operational_period_sets, list())
# ### Constraint and objective function components ### #
# Load balance constraint
# Modules will add component names to these lists
setattr(self, load_balance_production_components, list())
setattr(self, load_balance_consumption_components, list())
# Carbon cap constraint
# Modules will add component names to these lists
setattr(self, carbon_cap_balance_emission_components, list())
# PRM constraint
# Modules will add component names to this list
setattr(self, prm_balance_provision_components, list())
# Local capacity constraint
# Modules will add component names to this list
setattr(self, local_capacity_balance_provision_components, list())
# Objective functions
# Modules will add component names to this list
setattr(self, cost_components, list())
setattr(self, revenue_components, list())
| 42.092308
| 79
| 0.738121
|
from builtins import object
capacity_type_operational_period_sets = "capacity_type_operational_period_sets"
storage_only_capacity_type_operational_period_sets = \
"storage_only_capacity_type_operational_period_sets"
headroom_variables = "headroom_variables"
footroom_variables = "footroom_variables"
reserve_variable_derate_params = "reserve_variable_derate_params"
reserve_to_energy_adjustment_params = \
"reserve_to_energy_adjustment_params"
prm_cost_group_sets = "prm_cost_groups"
prm_cost_group_prm_type = "prm_cost_group_prm_type"
tx_capacity_type_operational_period_sets = \
"tx_capacity_type_operational_period_sets"
load_balance_production_components = "load_balance_production_components"
load_balance_consumption_components = "load_balance_consumption_components"
carbon_cap_balance_emission_components = \
"carbon_cap_balance_emission_components"
prm_balance_provision_components = \
"prm_balance_provision_components"
local_capacity_balance_provision_components = \
"local_capacity_balance_provision_components"
cost_components = "cost_components"
revenue_components = "revenue_components"
class DynamicComponents(object):
def __init__(self):
st())
setattr(self, storage_only_capacity_type_operational_period_sets,
list())
setattr(self, prm_cost_group_sets, list())
setattr(self, prm_cost_group_prm_type, dict())
s, dict())
setattr(self, footroom_variables, dict())
setattr(self, reserve_variable_derate_params, dict())
setattr(self, reserve_to_energy_adjustment_params, dict())
mponents, list())
setattr(self, prm_balance_provision_components, list())
setattr(self, local_capacity_balance_provision_components, list())
setattr(self, cost_components, list())
setattr(self, revenue_components, list())
| true
| true
|
1c4315485dc6a97257b664665246fdef117f21c5
| 610
|
py
|
Python
|
djangogram/users/models.py
|
nothors2/djangogram
|
1250e301026be2218b6b116895a16217770efb17
|
[
"MIT"
] | null | null | null |
djangogram/users/models.py
|
nothors2/djangogram
|
1250e301026be2218b6b116895a16217770efb17
|
[
"MIT"
] | null | null | null |
djangogram/users/models.py
|
nothors2/djangogram
|
1250e301026be2218b6b116895a16217770efb17
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
"""Default user for djangogram.
"""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
| 27.727273
| 74
| 0.685246
|
from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
name = CharField(_("Name of User"), blank=True, max_length=255)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
| true
| true
|
1c4316d17e8a809a1b0d8b5c86ad35c3660f6af3
| 4,449
|
py
|
Python
|
wavefront_api_client/models/response_container_list_string.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/models/response_container_list_string.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/models/response_container_list_string.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: support@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.models.response_status import ResponseStatus # noqa: F401,E501
class ResponseContainerListString(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'response': 'list[str]',
'status': 'ResponseStatus'
}
attribute_map = {
'response': 'response',
'status': 'status'
}
def __init__(self, response=None, status=None): # noqa: E501
"""ResponseContainerListString - a model defined in Swagger""" # noqa: E501
self._response = None
self._status = None
self.discriminator = None
if response is not None:
self.response = response
self.status = status
@property
def response(self):
"""Gets the response of this ResponseContainerListString. # noqa: E501
:return: The response of this ResponseContainerListString. # noqa: E501
:rtype: list[str]
"""
return self._response
@response.setter
def response(self, response):
"""Sets the response of this ResponseContainerListString.
:param response: The response of this ResponseContainerListString. # noqa: E501
:type: list[str]
"""
self._response = response
@property
def status(self):
"""Gets the status of this ResponseContainerListString. # noqa: E501
:return: The status of this ResponseContainerListString. # noqa: E501
:rtype: ResponseStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ResponseContainerListString.
:param status: The status of this ResponseContainerListString. # noqa: E501
:type: ResponseStatus
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseContainerListString, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResponseContainerListString):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.682759
| 409
| 0.601483
|
import pprint
import re
import six
from wavefront_api_client.models.response_status import ResponseStatus
class ResponseContainerListString(object):
swagger_types = {
'response': 'list[str]',
'status': 'ResponseStatus'
}
attribute_map = {
'response': 'response',
'status': 'status'
}
def __init__(self, response=None, status=None):
self._response = None
self._status = None
self.discriminator = None
if response is not None:
self.response = response
self.status = status
@property
def response(self):
return self._response
@response.setter
def response(self, response):
self._response = response
@property
def status(self):
return self._status
@status.setter
def status(self, status):
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
self._status = status
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseContainerListString, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ResponseContainerListString):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c4316da8a7451b37c49defcc4c8ba806a5b3516
| 464
|
py
|
Python
|
ctsb/models/__init__.py
|
paula-gradu/ctsb
|
fdc00acb798949ce1120778ad4725faf170f80c3
|
[
"Apache-2.0"
] | 1
|
2021-07-03T05:26:56.000Z
|
2021-07-03T05:26:56.000Z
|
ctsb/models/__init__.py
|
paula-gradu/ctsb
|
fdc00acb798949ce1120778ad4725faf170f80c3
|
[
"Apache-2.0"
] | null | null | null |
ctsb/models/__init__.py
|
paula-gradu/ctsb
|
fdc00acb798949ce1120778ad4725faf170f80c3
|
[
"Apache-2.0"
] | null | null | null |
# models init file
from ctsb.models.registration import model_registry, model_register, model, model_spec
from ctsb.models.core import Model, CustomModel
# ---------- Models ----------
model_register(
id='LastValue',
entry_point='ctsb.models.time_series:LastValue',
)
model_register(
id='Linear',
entry_point='ctsb.models.time_series:Linear',
)
model_register(
id='PredictZero',
entry_point='ctsb.models.time_series:PredictZero',
)
| 18.56
| 86
| 0.713362
|
from ctsb.models.registration import model_registry, model_register, model, model_spec
from ctsb.models.core import Model, CustomModel
model_register(
id='LastValue',
entry_point='ctsb.models.time_series:LastValue',
)
model_register(
id='Linear',
entry_point='ctsb.models.time_series:Linear',
)
model_register(
id='PredictZero',
entry_point='ctsb.models.time_series:PredictZero',
)
| true
| true
|
1c43180dbe1faed6c7475316a57df59e40602db1
| 155,494
|
py
|
Python
|
goodies/ospexporter/export_fbx_bin.py
|
Ghimli/new-ospgl
|
31bd84e52d954683671211ff16ce8702bdb87312
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2020-01-18T22:13:24.000Z
|
2020-01-18T22:13:24.000Z
|
release/scripts/addons/io_scene_fbx/export_fbx_bin.py
|
ringsce/Rings3D
|
8059d1e2460fc8d6f101eff8e695f68a99f6671d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/addons/io_scene_fbx/export_fbx_bin.py
|
ringsce/Rings3D
|
8059d1e2460fc8d6f101eff8e695f68a99f6671d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Script copyright (C) Campbell Barton, Bastien Montagne
import array
import datetime
import math
import os
import time
from itertools import zip_longest, chain
if "bpy" in locals():
import importlib
if "encode_bin" in locals():
importlib.reload(encode_bin)
if "data_types" in locals():
importlib.reload(data_types)
if "fbx_utils" in locals():
importlib.reload(fbx_utils)
import bpy
import bpy_extras
from bpy_extras import node_shader_utils
from mathutils import Vector, Matrix
from . import encode_bin, data_types, fbx_utils
from .fbx_utils import (
# Constants.
FBX_VERSION, FBX_HEADER_VERSION, FBX_SCENEINFO_VERSION, FBX_TEMPLATES_VERSION,
FBX_MODELS_VERSION,
FBX_GEOMETRY_VERSION, FBX_GEOMETRY_NORMAL_VERSION, FBX_GEOMETRY_BINORMAL_VERSION, FBX_GEOMETRY_TANGENT_VERSION,
FBX_GEOMETRY_SMOOTHING_VERSION, FBX_GEOMETRY_CREASE_VERSION, FBX_GEOMETRY_VCOLOR_VERSION, FBX_GEOMETRY_UV_VERSION,
FBX_GEOMETRY_MATERIAL_VERSION, FBX_GEOMETRY_LAYER_VERSION,
FBX_GEOMETRY_SHAPE_VERSION, FBX_DEFORMER_SHAPE_VERSION, FBX_DEFORMER_SHAPECHANNEL_VERSION,
FBX_POSE_BIND_VERSION, FBX_DEFORMER_SKIN_VERSION, FBX_DEFORMER_CLUSTER_VERSION,
FBX_MATERIAL_VERSION, FBX_TEXTURE_VERSION,
FBX_ANIM_KEY_VERSION,
FBX_ANIM_PROPSGROUP_NAME,
FBX_KTIME,
BLENDER_OTHER_OBJECT_TYPES, BLENDER_OBJECT_TYPES_MESHLIKE,
FBX_LIGHT_TYPES, FBX_LIGHT_DECAY_TYPES,
RIGHT_HAND_AXES, FBX_FRAMERATES,
# Miscellaneous utils.
PerfMon,
units_blender_to_fbx_factor, units_convertor, units_convertor_iter,
matrix4_to_array, similar_values, similar_values_iter,
# Mesh transform helpers.
vcos_transformed_gen, nors_transformed_gen,
# UUID from key.
get_fbx_uuid_from_key,
# Key generators.
get_blenderID_key, get_blenderID_name,
get_blender_mesh_shape_key, get_blender_mesh_shape_channel_key,
get_blender_empty_key, get_blender_bone_key,
get_blender_bindpose_key, get_blender_armature_skin_key, get_blender_bone_cluster_key,
get_blender_anim_id_base, get_blender_anim_stack_key, get_blender_anim_layer_key,
get_blender_anim_curve_node_key, get_blender_anim_curve_key,
get_blender_nodetexture_key,
# FBX element data.
elem_empty,
elem_data_single_bool, elem_data_single_int16, elem_data_single_int32, elem_data_single_int64,
elem_data_single_float32, elem_data_single_float64,
elem_data_single_bytes, elem_data_single_string, elem_data_single_string_unicode,
elem_data_single_bool_array, elem_data_single_int32_array, elem_data_single_int64_array,
elem_data_single_float32_array, elem_data_single_float64_array, elem_data_vec_float64,
# FBX element properties.
elem_properties, elem_props_set, elem_props_compound,
# FBX element properties handling templates.
elem_props_template_init, elem_props_template_set, elem_props_template_finalize,
# Templates.
FBXTemplate, fbx_templates_generate,
# Animation.
AnimationCurveNodeWrapper,
# Objects.
ObjectWrapper, fbx_name_class,
# Top level.
FBXExportSettingsMedia, FBXExportSettings, FBXExportData,
)
# Units convertors!
convert_sec_to_ktime = units_convertor("second", "ktime")
convert_sec_to_ktime_iter = units_convertor_iter("second", "ktime")
convert_mm_to_inch = units_convertor("millimeter", "inch")
convert_rad_to_deg = units_convertor("radian", "degree")
convert_rad_to_deg_iter = units_convertor_iter("radian", "degree")
# ##### Templates #####
# TODO: check all those "default" values, they should match Blender's default as much as possible, I guess?
def fbx_template_def_globalsettings(scene, settings, override_defaults=None, nbr_users=0):
props = {}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"GlobalSettings", b"", props, nbr_users, [False])
def fbx_template_def_model(scene, settings, override_defaults=None, nbr_users=0):
gscale = settings.global_scale
props = {
# Name, Value, Type, Animatable
b"QuaternionInterpolate": (0, "p_enum", False), # 0 = no quat interpolation.
b"RotationOffset": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"RotationPivot": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"ScalingOffset": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"ScalingPivot": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"TranslationActive": (False, "p_bool", False),
b"TranslationMin": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"TranslationMax": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"TranslationMinX": (False, "p_bool", False),
b"TranslationMinY": (False, "p_bool", False),
b"TranslationMinZ": (False, "p_bool", False),
b"TranslationMaxX": (False, "p_bool", False),
b"TranslationMaxY": (False, "p_bool", False),
b"TranslationMaxZ": (False, "p_bool", False),
b"RotationOrder": (0, "p_enum", False), # we always use 'XYZ' order.
b"RotationSpaceForLimitOnly": (False, "p_bool", False),
b"RotationStiffnessX": (0.0, "p_double", False),
b"RotationStiffnessY": (0.0, "p_double", False),
b"RotationStiffnessZ": (0.0, "p_double", False),
b"AxisLen": (10.0, "p_double", False),
b"PreRotation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"PostRotation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"RotationActive": (False, "p_bool", False),
b"RotationMin": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"RotationMax": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"RotationMinX": (False, "p_bool", False),
b"RotationMinY": (False, "p_bool", False),
b"RotationMinZ": (False, "p_bool", False),
b"RotationMaxX": (False, "p_bool", False),
b"RotationMaxY": (False, "p_bool", False),
b"RotationMaxZ": (False, "p_bool", False),
b"InheritType": (0, "p_enum", False), # RrSs
b"ScalingActive": (False, "p_bool", False),
b"ScalingMin": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"ScalingMax": ((1.0, 1.0, 1.0), "p_vector_3d", False),
b"ScalingMinX": (False, "p_bool", False),
b"ScalingMinY": (False, "p_bool", False),
b"ScalingMinZ": (False, "p_bool", False),
b"ScalingMaxX": (False, "p_bool", False),
b"ScalingMaxY": (False, "p_bool", False),
b"ScalingMaxZ": (False, "p_bool", False),
b"GeometricTranslation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"GeometricRotation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"GeometricScaling": ((1.0, 1.0, 1.0), "p_vector_3d", False),
b"MinDampRangeX": (0.0, "p_double", False),
b"MinDampRangeY": (0.0, "p_double", False),
b"MinDampRangeZ": (0.0, "p_double", False),
b"MaxDampRangeX": (0.0, "p_double", False),
b"MaxDampRangeY": (0.0, "p_double", False),
b"MaxDampRangeZ": (0.0, "p_double", False),
b"MinDampStrengthX": (0.0, "p_double", False),
b"MinDampStrengthY": (0.0, "p_double", False),
b"MinDampStrengthZ": (0.0, "p_double", False),
b"MaxDampStrengthX": (0.0, "p_double", False),
b"MaxDampStrengthY": (0.0, "p_double", False),
b"MaxDampStrengthZ": (0.0, "p_double", False),
b"PreferedAngleX": (0.0, "p_double", False),
b"PreferedAngleY": (0.0, "p_double", False),
b"PreferedAngleZ": (0.0, "p_double", False),
b"LookAtProperty": (None, "p_object", False),
b"UpVectorProperty": (None, "p_object", False),
b"Show": (True, "p_bool", False),
b"NegativePercentShapeSupport": (True, "p_bool", False),
b"DefaultAttributeIndex": (-1, "p_integer", False),
b"Freeze": (False, "p_bool", False),
b"LODBox": (False, "p_bool", False),
b"Lcl Translation": ((0.0, 0.0, 0.0), "p_lcl_translation", True),
b"Lcl Rotation": ((0.0, 0.0, 0.0), "p_lcl_rotation", True),
b"Lcl Scaling": ((1.0, 1.0, 1.0), "p_lcl_scaling", True),
b"Visibility": (1.0, "p_visibility", True),
b"Visibility Inheritance": (1, "p_visibility_inheritance", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Model", b"FbxNode", props, nbr_users, [False])
def fbx_template_def_null(scene, settings, override_defaults=None, nbr_users=0):
props = {
b"Color": ((0.8, 0.8, 0.8), "p_color_rgb", False),
b"Size": (100.0, "p_double", False),
b"Look": (1, "p_enum", False), # Cross (0 is None, i.e. invisible?).
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"NodeAttribute", b"FbxNull", props, nbr_users, [False])
def fbx_template_def_light(scene, settings, override_defaults=None, nbr_users=0):
gscale = settings.global_scale
props = {
b"LightType": (0, "p_enum", False), # Point light.
b"CastLight": (True, "p_bool", False),
b"Color": ((1.0, 1.0, 1.0), "p_color", True),
b"Intensity": (100.0, "p_number", True), # Times 100 compared to Blender values...
b"DecayType": (2, "p_enum", False), # Quadratic.
b"DecayStart": (30.0 * gscale, "p_double", False),
b"CastShadows": (True, "p_bool", False),
b"ShadowColor": ((0.0, 0.0, 0.0), "p_color", True),
b"AreaLightShape": (0, "p_enum", False), # Rectangle.
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"NodeAttribute", b"FbxLight", props, nbr_users, [False])
def fbx_template_def_camera(scene, settings, override_defaults=None, nbr_users=0):
r = scene.render
props = {
b"Color": ((0.8, 0.8, 0.8), "p_color_rgb", False),
b"Position": ((0.0, 0.0, -50.0), "p_vector", True),
b"UpVector": ((0.0, 1.0, 0.0), "p_vector", True),
b"InterestPosition": ((0.0, 0.0, 0.0), "p_vector", True),
b"Roll": (0.0, "p_roll", True),
b"OpticalCenterX": (0.0, "p_opticalcenterx", True),
b"OpticalCenterY": (0.0, "p_opticalcentery", True),
b"BackgroundColor": ((0.63, 0.63, 0.63), "p_color", True),
b"TurnTable": (0.0, "p_number", True),
b"DisplayTurnTableIcon": (False, "p_bool", False),
b"UseMotionBlur": (False, "p_bool", False),
b"UseRealTimeMotionBlur": (True, "p_bool", False),
b"Motion Blur Intensity": (1.0, "p_number", True),
b"AspectRatioMode": (0, "p_enum", False), # WindowSize.
b"AspectWidth": (320.0, "p_double", False),
b"AspectHeight": (200.0, "p_double", False),
b"PixelAspectRatio": (1.0, "p_double", False),
b"FilmOffsetX": (0.0, "p_number", True),
b"FilmOffsetY": (0.0, "p_number", True),
b"FilmWidth": (0.816, "p_double", False),
b"FilmHeight": (0.612, "p_double", False),
b"FilmAspectRatio": (1.3333333333333333, "p_double", False),
b"FilmSqueezeRatio": (1.0, "p_double", False),
b"FilmFormatIndex": (0, "p_enum", False), # Assuming this is ApertureFormat, 0 = custom.
b"PreScale": (1.0, "p_number", True),
b"FilmTranslateX": (0.0, "p_number", True),
b"FilmTranslateY": (0.0, "p_number", True),
b"FilmRollPivotX": (0.0, "p_number", True),
b"FilmRollPivotY": (0.0, "p_number", True),
b"FilmRollValue": (0.0, "p_number", True),
b"FilmRollOrder": (0, "p_enum", False), # 0 = rotate first (default).
b"ApertureMode": (2, "p_enum", False), # 2 = Vertical.
b"GateFit": (0, "p_enum", False), # 0 = no resolution gate fit.
b"FieldOfView": (25.114999771118164, "p_fov", True),
b"FieldOfViewX": (40.0, "p_fov_x", True),
b"FieldOfViewY": (40.0, "p_fov_y", True),
b"FocalLength": (34.89327621672628, "p_number", True),
b"CameraFormat": (0, "p_enum", False), # Custom camera format.
b"UseFrameColor": (False, "p_bool", False),
b"FrameColor": ((0.3, 0.3, 0.3), "p_color_rgb", False),
b"ShowName": (True, "p_bool", False),
b"ShowInfoOnMoving": (True, "p_bool", False),
b"ShowGrid": (True, "p_bool", False),
b"ShowOpticalCenter": (False, "p_bool", False),
b"ShowAzimut": (True, "p_bool", False),
b"ShowTimeCode": (False, "p_bool", False),
b"ShowAudio": (False, "p_bool", False),
b"AudioColor": ((0.0, 1.0, 0.0), "p_vector_3d", False), # Yep, vector3d, not corlorgb… :cry:
b"NearPlane": (10.0, "p_double", False),
b"FarPlane": (4000.0, "p_double", False),
b"AutoComputeClipPanes": (False, "p_bool", False),
b"ViewCameraToLookAt": (True, "p_bool", False),
b"ViewFrustumNearFarPlane": (False, "p_bool", False),
b"ViewFrustumBackPlaneMode": (2, "p_enum", False), # 2 = show back plane if texture added.
b"BackPlaneDistance": (4000.0, "p_number", True),
b"BackPlaneDistanceMode": (1, "p_enum", False), # 1 = relative to camera.
b"ViewFrustumFrontPlaneMode": (2, "p_enum", False), # 2 = show front plane if texture added.
b"FrontPlaneDistance": (10.0, "p_number", True),
b"FrontPlaneDistanceMode": (1, "p_enum", False), # 1 = relative to camera.
b"LockMode": (False, "p_bool", False),
b"LockInterestNavigation": (False, "p_bool", False),
# BackPlate... properties **arggggg!**
b"FitImage": (False, "p_bool", False),
b"Crop": (False, "p_bool", False),
b"Center": (True, "p_bool", False),
b"KeepRatio": (True, "p_bool", False),
# End of BackPlate...
b"BackgroundAlphaTreshold": (0.5, "p_double", False),
b"ShowBackplate": (True, "p_bool", False),
b"BackPlaneOffsetX": (0.0, "p_number", True),
b"BackPlaneOffsetY": (0.0, "p_number", True),
b"BackPlaneRotation": (0.0, "p_number", True),
b"BackPlaneScaleX": (1.0, "p_number", True),
b"BackPlaneScaleY": (1.0, "p_number", True),
b"Background Texture": (None, "p_object", False),
b"FrontPlateFitImage": (True, "p_bool", False),
b"FrontPlateCrop": (False, "p_bool", False),
b"FrontPlateCenter": (True, "p_bool", False),
b"FrontPlateKeepRatio": (True, "p_bool", False),
b"Foreground Opacity": (1.0, "p_double", False),
b"ShowFrontplate": (True, "p_bool", False),
b"FrontPlaneOffsetX": (0.0, "p_number", True),
b"FrontPlaneOffsetY": (0.0, "p_number", True),
b"FrontPlaneRotation": (0.0, "p_number", True),
b"FrontPlaneScaleX": (1.0, "p_number", True),
b"FrontPlaneScaleY": (1.0, "p_number", True),
b"Foreground Texture": (None, "p_object", False),
b"DisplaySafeArea": (False, "p_bool", False),
b"DisplaySafeAreaOnRender": (False, "p_bool", False),
b"SafeAreaDisplayStyle": (1, "p_enum", False), # 1 = rounded corners.
b"SafeAreaAspectRatio": (1.3333333333333333, "p_double", False),
b"Use2DMagnifierZoom": (False, "p_bool", False),
b"2D Magnifier Zoom": (100.0, "p_number", True),
b"2D Magnifier X": (50.0, "p_number", True),
b"2D Magnifier Y": (50.0, "p_number", True),
b"CameraProjectionType": (0, "p_enum", False), # 0 = perspective, 1 = orthogonal.
b"OrthoZoom": (1.0, "p_double", False),
b"UseRealTimeDOFAndAA": (False, "p_bool", False),
b"UseDepthOfField": (False, "p_bool", False),
b"FocusSource": (0, "p_enum", False), # 0 = camera interest, 1 = distance from camera interest.
b"FocusAngle": (3.5, "p_double", False), # ???
b"FocusDistance": (200.0, "p_double", False),
b"UseAntialiasing": (False, "p_bool", False),
b"AntialiasingIntensity": (0.77777, "p_double", False),
b"AntialiasingMethod": (0, "p_enum", False), # 0 = oversampling, 1 = hardware.
b"UseAccumulationBuffer": (False, "p_bool", False),
b"FrameSamplingCount": (7, "p_integer", False),
b"FrameSamplingType": (1, "p_enum", False), # 0 = uniform, 1 = stochastic.
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"NodeAttribute", b"FbxCamera", props, nbr_users, [False])
def fbx_template_def_bone(scene, settings, override_defaults=None, nbr_users=0):
props = {}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"NodeAttribute", b"LimbNode", props, nbr_users, [False])
def fbx_template_def_geometry(scene, settings, override_defaults=None, nbr_users=0):
props = {
b"Color": ((0.8, 0.8, 0.8), "p_color_rgb", False),
b"BBoxMin": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"BBoxMax": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"Primary Visibility": (True, "p_bool", False),
b"Casts Shadows": (True, "p_bool", False),
b"Receive Shadows": (True, "p_bool", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Geometry", b"FbxMesh", props, nbr_users, [False])
def fbx_template_def_material(scene, settings, override_defaults=None, nbr_users=0):
# WIP...
props = {
b"ShadingModel": ("Phong", "p_string", False),
b"MultiLayer": (False, "p_bool", False),
# Lambert-specific.
b"EmissiveColor": ((0.0, 0.0, 0.0), "p_color", True),
b"EmissiveFactor": (1.0, "p_number", True),
b"AmbientColor": ((0.2, 0.2, 0.2), "p_color", True),
b"AmbientFactor": (1.0, "p_number", True),
b"DiffuseColor": ((0.8, 0.8, 0.8), "p_color", True),
b"DiffuseFactor": (1.0, "p_number", True),
b"TransparentColor": ((0.0, 0.0, 0.0), "p_color", True),
b"TransparencyFactor": (0.0, "p_number", True),
b"Opacity": (1.0, "p_number", True),
b"NormalMap": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"Bump": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"BumpFactor": (1.0, "p_double", False),
b"DisplacementColor": ((0.0, 0.0, 0.0), "p_color_rgb", False),
b"DisplacementFactor": (1.0, "p_double", False),
b"VectorDisplacementColor": ((0.0, 0.0, 0.0), "p_color_rgb", False),
b"VectorDisplacementFactor": (1.0, "p_double", False),
# Phong-specific.
b"SpecularColor": ((0.2, 0.2, 0.2), "p_color", True),
b"SpecularFactor": (1.0, "p_number", True),
# Not sure about the name, importer uses this (but ShininessExponent for tex prop name!)
# And in fbx exported by sdk, you have one in template, the other in actual material!!! :/
# For now, using both.
b"Shininess": (20.0, "p_number", True),
b"ShininessExponent": (20.0, "p_number", True),
b"ReflectionColor": ((0.0, 0.0, 0.0), "p_color", True),
b"ReflectionFactor": (1.0, "p_number", True),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Material", b"FbxSurfacePhong", props, nbr_users, [False])
def fbx_template_def_texture_file(scene, settings, override_defaults=None, nbr_users=0):
# WIP...
# XXX Not sure about all names!
props = {
b"TextureTypeUse": (0, "p_enum", False), # Standard.
b"AlphaSource": (2, "p_enum", False), # Black (i.e. texture's alpha), XXX name guessed!.
b"Texture alpha": (1.0, "p_double", False),
b"PremultiplyAlpha": (True, "p_bool", False),
b"CurrentTextureBlendMode": (1, "p_enum", False), # Additive...
b"CurrentMappingType": (0, "p_enum", False), # UV.
b"UVSet": ("default", "p_string", False), # UVMap name.
b"WrapModeU": (0, "p_enum", False), # Repeat.
b"WrapModeV": (0, "p_enum", False), # Repeat.
b"UVSwap": (False, "p_bool", False),
b"Translation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"Rotation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"Scaling": ((1.0, 1.0, 1.0), "p_vector_3d", False),
b"TextureRotationPivot": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"TextureScalingPivot": ((0.0, 0.0, 0.0), "p_vector_3d", False),
# Not sure about those two...
b"UseMaterial": (False, "p_bool", False),
b"UseMipMap": (False, "p_bool", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Texture", b"FbxFileTexture", props, nbr_users, [False])
def fbx_template_def_video(scene, settings, override_defaults=None, nbr_users=0):
# WIP...
props = {
# All pictures.
b"Width": (0, "p_integer", False),
b"Height": (0, "p_integer", False),
b"Path": ("", "p_string_url", False),
b"AccessMode": (0, "p_enum", False), # Disk (0=Disk, 1=Mem, 2=DiskAsync).
# All videos.
b"StartFrame": (0, "p_integer", False),
b"StopFrame": (0, "p_integer", False),
b"Offset": (0, "p_timestamp", False),
b"PlaySpeed": (0.0, "p_double", False),
b"FreeRunning": (False, "p_bool", False),
b"Loop": (False, "p_bool", False),
b"InterlaceMode": (0, "p_enum", False), # None, i.e. progressive.
# Image sequences.
b"ImageSequence": (False, "p_bool", False),
b"ImageSequenceOffset": (0, "p_integer", False),
b"FrameRate": (0.0, "p_double", False),
b"LastFrame": (0, "p_integer", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Video", b"FbxVideo", props, nbr_users, [False])
def fbx_template_def_pose(scene, settings, override_defaults=None, nbr_users=0):
props = {}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Pose", b"", props, nbr_users, [False])
def fbx_template_def_deformer(scene, settings, override_defaults=None, nbr_users=0):
props = {}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Deformer", b"", props, nbr_users, [False])
def fbx_template_def_animstack(scene, settings, override_defaults=None, nbr_users=0):
props = {
b"Description": ("", "p_string", False),
b"LocalStart": (0, "p_timestamp", False),
b"LocalStop": (0, "p_timestamp", False),
b"ReferenceStart": (0, "p_timestamp", False),
b"ReferenceStop": (0, "p_timestamp", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"AnimationStack", b"FbxAnimStack", props, nbr_users, [False])
def fbx_template_def_animlayer(scene, settings, override_defaults=None, nbr_users=0):
props = {
b"Weight": (100.0, "p_number", True),
b"Mute": (False, "p_bool", False),
b"Solo": (False, "p_bool", False),
b"Lock": (False, "p_bool", False),
b"Color": ((0.8, 0.8, 0.8), "p_color_rgb", False),
b"BlendMode": (0, "p_enum", False),
b"RotationAccumulationMode": (0, "p_enum", False),
b"ScaleAccumulationMode": (0, "p_enum", False),
b"BlendModeBypass": (0, "p_ulonglong", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"AnimationLayer", b"FbxAnimLayer", props, nbr_users, [False])
def fbx_template_def_animcurvenode(scene, settings, override_defaults=None, nbr_users=0):
props = {
FBX_ANIM_PROPSGROUP_NAME.encode(): (None, "p_compound", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"AnimationCurveNode", b"FbxAnimCurveNode", props, nbr_users, [False])
def fbx_template_def_animcurve(scene, settings, override_defaults=None, nbr_users=0):
props = {}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"AnimationCurve", b"", props, nbr_users, [False])
# ##### Generators for connection elements. #####
def elem_connection(elem, c_type, uid_src, uid_dst, prop_dst=None):
e = elem_data_single_string(elem, b"C", c_type)
e.add_int64(uid_src)
e.add_int64(uid_dst)
if prop_dst is not None:
e.add_string(prop_dst)
# ##### FBX objects generators. #####
def fbx_data_element_custom_properties(props, bid):
"""
Store custom properties of blender ID bid (any mapping-like object, in fact) into FBX properties props.
"""
items = bid.items()
if not items:
return
rna_properties = {prop.identifier for prop in bid.bl_rna.properties if prop.is_runtime}
for k, v in items:
if k == '_RNA_UI' or k in rna_properties:
continue
list_val = getattr(v, "to_list", lambda: None)()
if isinstance(v, str):
elem_props_set(props, "p_string", k.encode(), v, custom=True)
elif isinstance(v, int):
elem_props_set(props, "p_integer", k.encode(), v, custom=True)
elif isinstance(v, float):
elem_props_set(props, "p_double", k.encode(), v, custom=True)
elif list_val:
if len(list_val) == 3:
elem_props_set(props, "p_vector", k.encode(), list_val, custom=True)
else:
elem_props_set(props, "p_string", k.encode(), str(list_val), custom=True)
else:
elem_props_set(props, "p_string", k.encode(), str(v), custom=True)
def fbx_data_empty_elements(root, empty, scene_data):
"""
Write the Empty data block (you can control its FBX datatype with the 'fbx_type' string custom property).
"""
empty_key = scene_data.data_empties[empty]
null = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(empty_key))
null.add_string(fbx_name_class(empty.name.encode(), b"NodeAttribute"))
val = empty.bdata.get('fbx_type', None)
null.add_string(val.encode() if val and isinstance(val, str) else b"Null")
elem_data_single_string(null, b"TypeFlags", b"Null")
tmpl = elem_props_template_init(scene_data.templates, b"Null")
props = elem_properties(null)
elem_props_template_finalize(tmpl, props)
# No custom properties, already saved with object (Model).
def fbx_data_light_elements(root, lamp, scene_data):
"""
Write the Lamp data block.
"""
gscale = scene_data.settings.global_scale
light_key = scene_data.data_lights[lamp]
do_light = True
decay_type = FBX_LIGHT_DECAY_TYPES['CONSTANT']
do_shadow = False
shadow_color = Vector((0.0, 0.0, 0.0))
if lamp.type not in {'HEMI'}:
if lamp.type not in {'SUN', 'AREA'}:
decay_type = FBX_LIGHT_DECAY_TYPES[lamp.falloff_type]
do_light = True
do_shadow = lamp.use_shadow
shadow_color = lamp.shadow_color
light = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(light_key))
light.add_string(fbx_name_class(lamp.name.encode(), b"NodeAttribute"))
light.add_string(b"Light")
elem_data_single_int32(light, b"GeometryVersion", FBX_GEOMETRY_VERSION) # Sic...
tmpl = elem_props_template_init(scene_data.templates, b"Light")
props = elem_properties(light)
elem_props_template_set(tmpl, props, "p_enum", b"LightType", FBX_LIGHT_TYPES[lamp.type])
elem_props_template_set(tmpl, props, "p_bool", b"CastLight", do_light)
elem_props_template_set(tmpl, props, "p_color", b"Color", lamp.color)
elem_props_template_set(tmpl, props, "p_number", b"Intensity", lamp.energy * 100.0)
elem_props_template_set(tmpl, props, "p_enum", b"DecayType", decay_type)
elem_props_template_set(tmpl, props, "p_double", b"DecayStart", lamp.distance * gscale)
elem_props_template_set(tmpl, props, "p_bool", b"CastShadows", do_shadow)
elem_props_template_set(tmpl, props, "p_color", b"ShadowColor", shadow_color)
if lamp.type in {'SPOT'}:
elem_props_template_set(tmpl, props, "p_double", b"OuterAngle", math.degrees(lamp.spot_size))
elem_props_template_set(tmpl, props, "p_double", b"InnerAngle",
math.degrees(lamp.spot_size * (1.0 - lamp.spot_blend)))
elem_props_template_finalize(tmpl, props)
# Custom properties.
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, lamp)
def fbx_data_camera_elements(root, cam_obj, scene_data):
"""
Write the Camera data blocks.
"""
gscale = scene_data.settings.global_scale
cam = cam_obj.bdata
cam_data = cam.data
cam_key = scene_data.data_cameras[cam_obj]
# Real data now, good old camera!
# Object transform info.
loc, rot, scale, matrix, matrix_rot = cam_obj.fbx_object_tx(scene_data)
up = matrix_rot @ Vector((0.0, 1.0, 0.0))
to = matrix_rot @ Vector((0.0, 0.0, -1.0))
# Render settings.
# TODO We could export much more...
render = scene_data.scene.render
width = render.resolution_x
height = render.resolution_y
aspect = width / height
# Film width & height from mm to inches
filmwidth = convert_mm_to_inch(cam_data.sensor_width)
filmheight = convert_mm_to_inch(cam_data.sensor_height)
filmaspect = filmwidth / filmheight
# Film offset
offsetx = filmwidth * cam_data.shift_x
offsety = filmaspect * filmheight * cam_data.shift_y
cam = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(cam_key))
cam.add_string(fbx_name_class(cam_data.name.encode(), b"NodeAttribute"))
cam.add_string(b"Camera")
tmpl = elem_props_template_init(scene_data.templates, b"Camera")
props = elem_properties(cam)
elem_props_template_set(tmpl, props, "p_vector", b"Position", loc)
elem_props_template_set(tmpl, props, "p_vector", b"UpVector", up)
elem_props_template_set(tmpl, props, "p_vector", b"InterestPosition", loc + to) # Point, not vector!
# Should we use world value?
elem_props_template_set(tmpl, props, "p_color", b"BackgroundColor", (0.0, 0.0, 0.0))
elem_props_template_set(tmpl, props, "p_bool", b"DisplayTurnTableIcon", True)
elem_props_template_set(tmpl, props, "p_enum", b"AspectRatioMode", 2) # FixedResolution
elem_props_template_set(tmpl, props, "p_double", b"AspectWidth", float(render.resolution_x))
elem_props_template_set(tmpl, props, "p_double", b"AspectHeight", float(render.resolution_y))
elem_props_template_set(tmpl, props, "p_double", b"PixelAspectRatio",
float(render.pixel_aspect_x / render.pixel_aspect_y))
elem_props_template_set(tmpl, props, "p_double", b"FilmWidth", filmwidth)
elem_props_template_set(tmpl, props, "p_double", b"FilmHeight", filmheight)
elem_props_template_set(tmpl, props, "p_double", b"FilmAspectRatio", filmaspect)
elem_props_template_set(tmpl, props, "p_double", b"FilmOffsetX", offsetx)
elem_props_template_set(tmpl, props, "p_double", b"FilmOffsetY", offsety)
elem_props_template_set(tmpl, props, "p_enum", b"ApertureMode", 3) # FocalLength.
elem_props_template_set(tmpl, props, "p_enum", b"GateFit", 2) # FitHorizontal.
elem_props_template_set(tmpl, props, "p_fov", b"FieldOfView", math.degrees(cam_data.angle_x))
elem_props_template_set(tmpl, props, "p_fov_x", b"FieldOfViewX", math.degrees(cam_data.angle_x))
elem_props_template_set(tmpl, props, "p_fov_y", b"FieldOfViewY", math.degrees(cam_data.angle_y))
# No need to convert to inches here...
elem_props_template_set(tmpl, props, "p_double", b"FocalLength", cam_data.lens)
elem_props_template_set(tmpl, props, "p_double", b"SafeAreaAspectRatio", aspect)
# Default to perspective camera.
elem_props_template_set(tmpl, props, "p_enum", b"CameraProjectionType", 1 if cam_data.type == 'ORTHO' else 0)
elem_props_template_set(tmpl, props, "p_double", b"OrthoZoom", cam_data.ortho_scale)
elem_props_template_set(tmpl, props, "p_double", b"NearPlane", cam_data.clip_start * gscale)
elem_props_template_set(tmpl, props, "p_double", b"FarPlane", cam_data.clip_end * gscale)
elem_props_template_set(tmpl, props, "p_enum", b"BackPlaneDistanceMode", 1) # RelativeToCamera.
elem_props_template_set(tmpl, props, "p_double", b"BackPlaneDistance", cam_data.clip_end * gscale)
elem_props_template_finalize(tmpl, props)
# Custom properties.
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, cam_data)
elem_data_single_string(cam, b"TypeFlags", b"Camera")
elem_data_single_int32(cam, b"GeometryVersion", 124) # Sic...
elem_data_vec_float64(cam, b"Position", loc)
elem_data_vec_float64(cam, b"Up", up)
elem_data_vec_float64(cam, b"LookAt", to)
elem_data_single_int32(cam, b"ShowInfoOnMoving", 1)
elem_data_single_int32(cam, b"ShowAudio", 0)
elem_data_vec_float64(cam, b"AudioColor", (0.0, 1.0, 0.0))
elem_data_single_float64(cam, b"CameraOrthoZoom", 1.0)
def fbx_data_bindpose_element(root, me_obj, me, scene_data, arm_obj=None, mat_world_arm=None, bones=[]):
"""
Helper, since bindpose are used by both meshes shape keys and armature bones...
"""
if arm_obj is None:
arm_obj = me_obj
# We assume bind pose for our bones are their "Editmode" pose...
# All matrices are expected in global (world) space.
bindpose_key = get_blender_bindpose_key(arm_obj.bdata, me)
fbx_pose = elem_data_single_int64(root, b"Pose", get_fbx_uuid_from_key(bindpose_key))
fbx_pose.add_string(fbx_name_class(me.name.encode(), b"Pose"))
fbx_pose.add_string(b"BindPose")
elem_data_single_string(fbx_pose, b"Type", b"BindPose")
elem_data_single_int32(fbx_pose, b"Version", FBX_POSE_BIND_VERSION)
elem_data_single_int32(fbx_pose, b"NbPoseNodes", 1 + (1 if (arm_obj != me_obj) else 0) + len(bones))
# First node is mesh/object.
mat_world_obj = me_obj.fbx_object_matrix(scene_data, global_space=True)
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", me_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_obj))
# Second node is armature object itself.
if arm_obj != me_obj:
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", arm_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_arm))
# And all bones of armature!
mat_world_bones = {}
for bo_obj in bones:
bomat = bo_obj.fbx_object_matrix(scene_data, rest=True, global_space=True)
mat_world_bones[bo_obj] = bomat
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", bo_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(bomat))
return mat_world_obj, mat_world_bones
def fbx_data_mesh_shapes_elements(root, me_obj, me, scene_data, fbx_me_tmpl, fbx_me_props):
"""
Write shape keys related data.
"""
if me not in scene_data.data_deformers_shape:
return
write_normals = True # scene_data.settings.mesh_smooth_type in {'OFF'}
# First, write the geometry data itself (i.e. shapes).
_me_key, shape_key, shapes = scene_data.data_deformers_shape[me]
channels = []
for shape, (channel_key, geom_key, shape_verts_co, shape_verts_idx) in shapes.items():
# Use vgroups as weights, if defined.
if shape.vertex_group and shape.vertex_group in me_obj.bdata.vertex_groups:
shape_verts_weights = [0.0] * (len(shape_verts_co) // 3)
vg_idx = me_obj.bdata.vertex_groups[shape.vertex_group].index
for sk_idx, v_idx in enumerate(shape_verts_idx):
for vg in me.vertices[v_idx].groups:
if vg.group == vg_idx:
shape_verts_weights[sk_idx] = vg.weight * 100.0
else:
shape_verts_weights = [100.0] * (len(shape_verts_co) // 3)
channels.append((channel_key, shape, shape_verts_weights))
geom = elem_data_single_int64(root, b"Geometry", get_fbx_uuid_from_key(geom_key))
geom.add_string(fbx_name_class(shape.name.encode(), b"Geometry"))
geom.add_string(b"Shape")
tmpl = elem_props_template_init(scene_data.templates, b"Geometry")
props = elem_properties(geom)
elem_props_template_finalize(tmpl, props)
elem_data_single_int32(geom, b"Version", FBX_GEOMETRY_SHAPE_VERSION)
elem_data_single_int32_array(geom, b"Indexes", shape_verts_idx)
elem_data_single_float64_array(geom, b"Vertices", shape_verts_co)
if write_normals:
elem_data_single_float64_array(geom, b"Normals", [0.0] * len(shape_verts_co))
# Yiha! BindPose for shapekeys too! Dodecasigh...
# XXX Not sure yet whether several bindposes on same mesh are allowed, or not... :/
fbx_data_bindpose_element(root, me_obj, me, scene_data)
# ...and now, the deformers stuff.
fbx_shape = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(shape_key))
fbx_shape.add_string(fbx_name_class(me.name.encode(), b"Deformer"))
fbx_shape.add_string(b"BlendShape")
elem_data_single_int32(fbx_shape, b"Version", FBX_DEFORMER_SHAPE_VERSION)
for channel_key, shape, shape_verts_weights in channels:
fbx_channel = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(channel_key))
fbx_channel.add_string(fbx_name_class(shape.name.encode(), b"SubDeformer"))
fbx_channel.add_string(b"BlendShapeChannel")
elem_data_single_int32(fbx_channel, b"Version", FBX_DEFORMER_SHAPECHANNEL_VERSION)
elem_data_single_float64(fbx_channel, b"DeformPercent", shape.value * 100.0) # Percents...
elem_data_single_float64_array(fbx_channel, b"FullWeights", shape_verts_weights)
# *WHY* add this in linked mesh properties too? *cry*
# No idea whether it’s percent here too, or more usual factor (assume percentage for now) :/
elem_props_template_set(fbx_me_tmpl, fbx_me_props, "p_number", shape.name.encode(), shape.value * 100.0,
animatable=True)
def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
"""
Write the Mesh (Geometry) data block.
"""
# Ugly helper... :/
def _infinite_gen(val):
while 1:
yield val
me_key, me, _free = scene_data.data_meshes[me_obj]
# In case of multiple instances of same mesh, only write it once!
if me_key in done_meshes:
return
# No gscale/gmat here, all data are supposed to be in object space.
smooth_type = scene_data.settings.mesh_smooth_type
write_normals = True # smooth_type in {'OFF'}
do_bake_space_transform = me_obj.use_bake_space_transform(scene_data)
# Vertices are in object space, but we are post-multiplying all transforms with the inverse of the
# global matrix, so we need to apply the global matrix to the vertices to get the correct result.
geom_mat_co = scene_data.settings.global_matrix if do_bake_space_transform else None
# We need to apply the inverse transpose of the global matrix when transforming normals.
geom_mat_no = Matrix(scene_data.settings.global_matrix_inv_transposed) if do_bake_space_transform else None
if geom_mat_no is not None:
# Remove translation & scaling!
geom_mat_no.translation = Vector()
geom_mat_no.normalize()
geom = elem_data_single_int64(root, b"Geometry", get_fbx_uuid_from_key(me_key))
geom.add_string(fbx_name_class(me.name.encode(), b"Geometry"))
geom.add_string(b"Mesh")
tmpl = elem_props_template_init(scene_data.templates, b"Geometry")
props = elem_properties(geom)
# Custom properties.
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, me)
# Subdivision levels. Take them from the first found subsurf modifier from the
# first object that has the mesh. Write crease information if the object has
# and subsurf modifier.
write_crease = False
if scene_data.settings.use_subsurf:
last_subsurf = None
for mod in me_obj.bdata.modifiers:
if not (mod.show_render or mod.show_viewport):
continue
if mod.type == 'SUBSURF' and mod.subdivision_type == 'CATMULL_CLARK':
last_subsurf = mod
if last_subsurf:
elem_data_single_int32(geom, b"Smoothness", 2) # Display control mesh and smoothed
elem_data_single_int32(geom, b"BoundaryRule", 2) # Round edges like Blender
elem_data_single_int32(geom, b"PreviewDivisionLevels", last_subsurf.levels)
elem_data_single_int32(geom, b"RenderDivisionLevels", last_subsurf.render_levels)
elem_data_single_int32(geom, b"PreserveBorders", 0)
elem_data_single_int32(geom, b"PreserveHardEdges", 0)
elem_data_single_int32(geom, b"PropagateEdgeHardness", 0)
write_crease = last_subsurf.use_creases
elem_data_single_int32(geom, b"GeometryVersion", FBX_GEOMETRY_VERSION)
# Vertex cos.
t_co = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.vertices) * 3
me.vertices.foreach_get("co", t_co)
elem_data_single_float64_array(geom, b"Vertices", chain(*vcos_transformed_gen(t_co, geom_mat_co)))
del t_co
# Polygon indices.
#
# We do loose edges as two-vertices faces, if enabled...
#
# Note we have to process Edges in the same time, as they are based on poly's loops...
loop_nbr = len(me.loops)
t_pvi = array.array(data_types.ARRAY_INT32, (0,)) * loop_nbr
t_ls = [None] * len(me.polygons)
me.loops.foreach_get("vertex_index", t_pvi)
me.polygons.foreach_get("loop_start", t_ls)
# Add "fake" faces for loose edges.
if scene_data.settings.use_mesh_edges:
t_le = tuple(e.vertices for e in me.edges if e.is_loose)
t_pvi.extend(chain(*t_le))
t_ls.extend(range(loop_nbr, loop_nbr + len(t_le), 2))
del t_le
# Edges...
# Note: Edges are represented as a loop here: each edge uses a single index, which refers to the polygon array.
# The edge is made by the vertex indexed py this polygon's point and the next one on the same polygon.
# Advantage: Only one index per edge.
# Drawback: Only polygon's edges can be represented (that's why we have to add fake two-verts polygons
# for loose edges).
# We also have to store a mapping from real edges to their indices in this array, for edge-mapped data
# (like e.g. crease).
t_eli = array.array(data_types.ARRAY_INT32)
edges_map = {}
edges_nbr = 0
if t_ls and t_pvi:
t_ls = set(t_ls)
todo_edges = [None] * len(me.edges) * 2
# Sigh, cannot access edge.key through foreach_get... :/
me.edges.foreach_get("vertices", todo_edges)
todo_edges = set((v1, v2) if v1 < v2 else (v2, v1) for v1, v2 in zip(*(iter(todo_edges),) * 2))
li = 0
vi = vi_start = t_pvi[0]
for li_next, vi_next in enumerate(t_pvi[1:] + t_pvi[:1], start=1):
if li_next in t_ls: # End of a poly's loop.
vi2 = vi_start
vi_start = vi_next
else:
vi2 = vi_next
e_key = (vi, vi2) if vi < vi2 else (vi2, vi)
if e_key in todo_edges:
t_eli.append(li)
todo_edges.remove(e_key)
edges_map[e_key] = edges_nbr
edges_nbr += 1
vi = vi_next
li = li_next
# End of edges!
# We have to ^-1 last index of each loop.
for ls in t_ls:
t_pvi[ls - 1] ^= -1
# And finally we can write data!
elem_data_single_int32_array(geom, b"PolygonVertexIndex", t_pvi)
elem_data_single_int32_array(geom, b"Edges", t_eli)
del t_pvi
del t_ls
del t_eli
# And now, layers!
# Smoothing.
if smooth_type in {'FACE', 'EDGE'}:
t_ps = None
_map = b""
if smooth_type == 'FACE':
t_ps = array.array(data_types.ARRAY_INT32, (0,)) * len(me.polygons)
me.polygons.foreach_get("use_smooth", t_ps)
_map = b"ByPolygon"
else: # EDGE
# Write Edge Smoothing.
# Note edge is sharp also if it's used by more than two faces, or one of its faces is flat.
t_ps = array.array(data_types.ARRAY_INT32, (0,)) * edges_nbr
sharp_edges = set()
temp_sharp_edges = {}
for p in me.polygons:
if not p.use_smooth:
sharp_edges.update(p.edge_keys)
continue
for k in p.edge_keys:
if temp_sharp_edges.setdefault(k, 0) > 1:
sharp_edges.add(k)
else:
temp_sharp_edges[k] += 1
del temp_sharp_edges
for e in me.edges:
if e.key not in edges_map:
continue # Only loose edges, in theory!
t_ps[edges_map[e.key]] = not (e.use_edge_sharp or (e.key in sharp_edges))
_map = b"ByEdge"
lay_smooth = elem_data_single_int32(geom, b"LayerElementSmoothing", 0)
elem_data_single_int32(lay_smooth, b"Version", FBX_GEOMETRY_SMOOTHING_VERSION)
elem_data_single_string(lay_smooth, b"Name", b"")
elem_data_single_string(lay_smooth, b"MappingInformationType", _map)
elem_data_single_string(lay_smooth, b"ReferenceInformationType", b"Direct")
elem_data_single_int32_array(lay_smooth, b"Smoothing", t_ps) # Sight, int32 for bool...
del t_ps
# Edge crease for subdivision
if write_crease:
t_ec = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * edges_nbr
for e in me.edges:
if e.key not in edges_map:
continue # Only loose edges, in theory!
# Blender squares those values before sending them to OpenSubdiv, when other softwares don't,
# so we need to compensate that to get similar results through FBX...
t_ec[edges_map[e.key]] = e.crease * e.crease
lay_crease = elem_data_single_int32(geom, b"LayerElementEdgeCrease", 0)
elem_data_single_int32(lay_crease, b"Version", FBX_GEOMETRY_CREASE_VERSION)
elem_data_single_string(lay_crease, b"Name", b"")
elem_data_single_string(lay_crease, b"MappingInformationType", b"ByEdge")
elem_data_single_string(lay_crease, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_crease, b"EdgeCrease", t_ec)
del t_ec
# And we are done with edges!
del edges_map
# Loop normals.
tspacenumber = 0
if write_normals:
# NOTE: this is not supported by importer currently.
# XXX Official docs says normals should use IndexToDirect,
# but this does not seem well supported by apps currently...
me.calc_normals_split()
t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 3
me.loops.foreach_get("normal", t_ln)
t_ln = nors_transformed_gen(t_ln, geom_mat_no)
if 0:
t_ln = tuple(t_ln) # No choice... :/
lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION)
elem_data_single_string(lay_nor, b"Name", b"")
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"IndexToDirect")
ln2idx = tuple(set(t_ln))
elem_data_single_float64_array(lay_nor, b"Normals", chain(*ln2idx))
# Normal weights, no idea what it is.
# t_lnw = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(ln2idx)
# elem_data_single_float64_array(lay_nor, b"NormalsW", t_lnw)
ln2idx = {nor: idx for idx, nor in enumerate(ln2idx)}
elem_data_single_int32_array(lay_nor, b"NormalsIndex", (ln2idx[n] for n in t_ln))
del ln2idx
# del t_lnw
else:
lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION)
elem_data_single_string(lay_nor, b"Name", b"")
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Normals", chain(*t_ln))
# Normal weights, no idea what it is.
# t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops)
# elem_data_single_float64_array(lay_nor, b"NormalsW", t_ln)
del t_ln
# tspace
if scene_data.settings.use_tspace:
tspacenumber = len(me.uv_layers)
if tspacenumber:
# We can only compute tspace on tessellated meshes, need to check that here...
t_lt = [None] * len(me.polygons)
me.polygons.foreach_get("loop_total", t_lt)
if any((lt > 4 for lt in t_lt)):
del t_lt
scene_data.settings.report(
{'WARNING'},
"Mesh '%s' has polygons with more than 4 vertices, "
"cannot compute/export tangent space for it" % me.name)
else:
del t_lt
t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 3
# t_lnw = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops)
uv_names = [uvlayer.name for uvlayer in me.uv_layers]
for name in uv_names:
me.calc_tangents(uvmap=name)
for idx, uvlayer in enumerate(me.uv_layers):
name = uvlayer.name
# Loop bitangents (aka binormals).
# NOTE: this is not supported by importer currently.
me.loops.foreach_get("bitangent", t_ln)
lay_nor = elem_data_single_int32(geom, b"LayerElementBinormal", idx)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_BINORMAL_VERSION)
elem_data_single_string_unicode(lay_nor, b"Name", name)
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Binormals",
chain(*nors_transformed_gen(t_ln, geom_mat_no)))
# Binormal weights, no idea what it is.
# elem_data_single_float64_array(lay_nor, b"BinormalsW", t_lnw)
# Loop tangents.
# NOTE: this is not supported by importer currently.
me.loops.foreach_get("tangent", t_ln)
lay_nor = elem_data_single_int32(geom, b"LayerElementTangent", idx)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_TANGENT_VERSION)
elem_data_single_string_unicode(lay_nor, b"Name", name)
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Tangents",
chain(*nors_transformed_gen(t_ln, geom_mat_no)))
# Tangent weights, no idea what it is.
# elem_data_single_float64_array(lay_nor, b"TangentsW", t_lnw)
del t_ln
# del t_lnw
me.free_tangents()
me.free_normals_split()
# Write VertexColor Layers.
vcolnumber = len(me.vertex_colors)
if vcolnumber:
def _coltuples_gen(raw_cols):
return zip(*(iter(raw_cols),) * 4)
t_lc = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 4
for colindex, collayer in enumerate(me.vertex_colors):
collayer.data.foreach_get("color", t_lc)
lay_vcol = elem_data_single_int32(geom, b"LayerElementColor", colindex)
elem_data_single_int32(lay_vcol, b"Version", FBX_GEOMETRY_VCOLOR_VERSION)
elem_data_single_string_unicode(lay_vcol, b"Name", collayer.name)
elem_data_single_string(lay_vcol, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_vcol, b"ReferenceInformationType", b"IndexToDirect")
col2idx = tuple(set(_coltuples_gen(t_lc)))
elem_data_single_float64_array(lay_vcol, b"Colors", chain(*col2idx)) # Flatten again...
col2idx = {col: idx for idx, col in enumerate(col2idx)}
elem_data_single_int32_array(lay_vcol, b"ColorIndex", (col2idx[c] for c in _coltuples_gen(t_lc)))
del col2idx
del t_lc
del _coltuples_gen
# Write UV layers.
# Note: LayerElementTexture is deprecated since FBX 2011 - luckily!
# Textures are now only related to materials, in FBX!
uvnumber = len(me.uv_layers)
if uvnumber:
# Looks like this mapping is also expected to convey UV islands (arg..... :((((( ).
# So we need to generate unique triplets (uv, vertex_idx) here, not only just based on UV values.
def _uvtuples_gen(raw_uvs, raw_lvidxs):
return zip(zip(*(iter(raw_uvs),) * 2), raw_lvidxs)
t_luv = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 2
t_lvidx = array.array(data_types.ARRAY_INT32, (0,)) * len(me.loops)
me.loops.foreach_get("vertex_index", t_lvidx)
for uvindex, uvlayer in enumerate(me.uv_layers):
uvlayer.data.foreach_get("uv", t_luv)
lay_uv = elem_data_single_int32(geom, b"LayerElementUV", uvindex)
elem_data_single_int32(lay_uv, b"Version", FBX_GEOMETRY_UV_VERSION)
elem_data_single_string_unicode(lay_uv, b"Name", uvlayer.name)
elem_data_single_string(lay_uv, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_uv, b"ReferenceInformationType", b"IndexToDirect")
uv_ids = tuple(set(_uvtuples_gen(t_luv, t_lvidx)))
elem_data_single_float64_array(lay_uv, b"UV", chain(*(uv for uv, vidx in uv_ids))) # Flatten again...
uv2idx = {uv_id: idx for idx, uv_id in enumerate(uv_ids)}
elem_data_single_int32_array(lay_uv, b"UVIndex", (uv2idx[uv_id] for uv_id in _uvtuples_gen(t_luv, t_lvidx)))
del uv2idx
del uv_ids
del t_luv
del t_lvidx
del _uvtuples_gen
# Face's materials.
me_fbxmaterials_idx = scene_data.mesh_material_indices.get(me)
if me_fbxmaterials_idx is not None:
# We cannot use me.materials here, as this array is filled with None in case materials are linked to object...
me_blmaterials = [mat_slot.material for mat_slot in me_obj.material_slots]
if me_fbxmaterials_idx and me_blmaterials:
lay_ma = elem_data_single_int32(geom, b"LayerElementMaterial", 0)
elem_data_single_int32(lay_ma, b"Version", FBX_GEOMETRY_MATERIAL_VERSION)
elem_data_single_string(lay_ma, b"Name", b"")
nbr_mats = len(me_fbxmaterials_idx)
if nbr_mats > 1:
t_pm = array.array(data_types.ARRAY_INT32, (0,)) * len(me.polygons)
me.polygons.foreach_get("material_index", t_pm)
# We have to validate mat indices, and map them to FBX indices.
# Note a mat might not be in me_fbxmats_idx (e.g. node mats are ignored).
blmaterials_to_fbxmaterials_idxs = [me_fbxmaterials_idx[m]
for m in me_blmaterials if m in me_fbxmaterials_idx]
ma_idx_limit = len(blmaterials_to_fbxmaterials_idxs)
def_ma = blmaterials_to_fbxmaterials_idxs[0]
_gen = (blmaterials_to_fbxmaterials_idxs[m] if m < ma_idx_limit else def_ma for m in t_pm)
t_pm = array.array(data_types.ARRAY_INT32, _gen)
elem_data_single_string(lay_ma, b"MappingInformationType", b"ByPolygon")
# XXX Logically, should be "Direct" reference type, since we do not have any index array, and have one
# value per polygon...
# But looks like FBX expects it to be IndexToDirect here (maybe because materials are already
# indices??? *sigh*).
elem_data_single_string(lay_ma, b"ReferenceInformationType", b"IndexToDirect")
elem_data_single_int32_array(lay_ma, b"Materials", t_pm)
del t_pm
else:
elem_data_single_string(lay_ma, b"MappingInformationType", b"AllSame")
elem_data_single_string(lay_ma, b"ReferenceInformationType", b"IndexToDirect")
elem_data_single_int32_array(lay_ma, b"Materials", [0])
# And the "layer TOC"...
layer = elem_data_single_int32(geom, b"Layer", 0)
elem_data_single_int32(layer, b"Version", FBX_GEOMETRY_LAYER_VERSION)
if write_normals:
lay_nor = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_nor, b"Type", b"LayerElementNormal")
elem_data_single_int32(lay_nor, b"TypedIndex", 0)
if tspacenumber:
lay_binor = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_binor, b"Type", b"LayerElementBinormal")
elem_data_single_int32(lay_binor, b"TypedIndex", 0)
lay_tan = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_tan, b"Type", b"LayerElementTangent")
elem_data_single_int32(lay_tan, b"TypedIndex", 0)
if smooth_type in {'FACE', 'EDGE'}:
lay_smooth = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_smooth, b"Type", b"LayerElementSmoothing")
elem_data_single_int32(lay_smooth, b"TypedIndex", 0)
if write_crease:
lay_smooth = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_smooth, b"Type", b"LayerElementEdgeCrease")
elem_data_single_int32(lay_smooth, b"TypedIndex", 0)
if vcolnumber:
lay_vcol = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_vcol, b"Type", b"LayerElementColor")
elem_data_single_int32(lay_vcol, b"TypedIndex", 0)
if uvnumber:
lay_uv = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_uv, b"Type", b"LayerElementUV")
elem_data_single_int32(lay_uv, b"TypedIndex", 0)
if me_fbxmaterials_idx is not None:
lay_ma = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_ma, b"Type", b"LayerElementMaterial")
elem_data_single_int32(lay_ma, b"TypedIndex", 0)
# Add other uv and/or vcol layers...
for vcolidx, uvidx, tspaceidx in zip_longest(range(1, vcolnumber), range(1, uvnumber), range(1, tspacenumber),
fillvalue=0):
layer = elem_data_single_int32(geom, b"Layer", max(vcolidx, uvidx))
elem_data_single_int32(layer, b"Version", FBX_GEOMETRY_LAYER_VERSION)
if vcolidx:
lay_vcol = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_vcol, b"Type", b"LayerElementColor")
elem_data_single_int32(lay_vcol, b"TypedIndex", vcolidx)
if uvidx:
lay_uv = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_uv, b"Type", b"LayerElementUV")
elem_data_single_int32(lay_uv, b"TypedIndex", uvidx)
if tspaceidx:
lay_binor = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_binor, b"Type", b"LayerElementBinormal")
elem_data_single_int32(lay_binor, b"TypedIndex", tspaceidx)
lay_tan = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_tan, b"Type", b"LayerElementTangent")
elem_data_single_int32(lay_tan, b"TypedIndex", tspaceidx)
# Shape keys...
fbx_data_mesh_shapes_elements(root, me_obj, me, scene_data, tmpl, props)
elem_props_template_finalize(tmpl, props)
done_meshes.add(me_key)
def fbx_data_material_elements(root, ma, scene_data):
"""
Write the Material data block.
"""
ambient_color = (0.0, 0.0, 0.0)
if scene_data.data_world:
ambient_color = next(iter(scene_data.data_world.keys())).color
ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=True)
ma_key, _objs = scene_data.data_materials[ma]
ma_type = b"Phong"
fbx_ma = elem_data_single_int64(root, b"Material", get_fbx_uuid_from_key(ma_key))
fbx_ma.add_string(fbx_name_class(ma.name.encode(), b"Material"))
fbx_ma.add_string(b"")
elem_data_single_int32(fbx_ma, b"Version", FBX_MATERIAL_VERSION)
# those are not yet properties, it seems...
elem_data_single_string(fbx_ma, b"ShadingModel", ma_type)
elem_data_single_int32(fbx_ma, b"MultiLayer", 0) # Should be bool...
tmpl = elem_props_template_init(scene_data.templates, b"Material")
props = elem_properties(fbx_ma)
elem_props_template_set(tmpl, props, "p_string", b"ShadingModel", ma_type.decode())
elem_props_template_set(tmpl, props, "p_color", b"DiffuseColor", ma_wrap.base_color)
# Not in Principled BSDF, so assuming always 1
elem_props_template_set(tmpl, props, "p_number", b"DiffuseFactor", 1.0)
# Principled BSDF only has an emissive color, so we assume factor to be always 1.0.
elem_props_template_set(tmpl, props, "p_color", b"EmissiveColor", ma_wrap.emission_color)
elem_props_template_set(tmpl, props, "p_number", b"EmissiveFactor", 1.0)
# Not in Principled BSDF, so assuming always 0
elem_props_template_set(tmpl, props, "p_color", b"AmbientColor", ambient_color)
elem_props_template_set(tmpl, props, "p_number", b"AmbientFactor", 0.0)
# Sweetness... Looks like we are not the only ones to not know exactly how FBX is supposed to work (see T59850).
# According to one of its developers, Unity uses that formula to extract alpha value:
#
# alpha = 1 - TransparencyFactor
# if (alpha == 1 or alpha == 0):
# alpha = 1 - TransparentColor.r
#
# Until further info, let's assume this is correct way to do, hence the following code for TransparentColor.
if ma_wrap.alpha < 1.0e-5 or ma_wrap.alpha > (1.0 - 1.0e-5):
elem_props_template_set(tmpl, props, "p_color", b"TransparentColor", (1.0 - ma_wrap.alpha,) * 3)
else:
elem_props_template_set(tmpl, props, "p_color", b"TransparentColor", ma_wrap.base_color)
elem_props_template_set(tmpl, props, "p_number", b"TransparencyFactor", 1.0 - ma_wrap.alpha)
elem_props_template_set(tmpl, props, "p_number", b"Opacity", ma_wrap.alpha)
elem_props_template_set(tmpl, props, "p_vector_3d", b"NormalMap", (0.0, 0.0, 0.0))
elem_props_template_set(tmpl, props, "p_double", b"BumpFactor", ma_wrap.normalmap_strength)
# Not sure about those...
"""
b"Bump": ((0.0, 0.0, 0.0), "p_vector_3d"),
b"DisplacementColor": ((0.0, 0.0, 0.0), "p_color_rgb"),
b"DisplacementFactor": (0.0, "p_double"),
"""
# TODO: use specular tint?
elem_props_template_set(tmpl, props, "p_color", b"SpecularColor", ma_wrap.base_color)
elem_props_template_set(tmpl, props, "p_number", b"SpecularFactor", ma_wrap.specular / 2.0)
# See Material template about those two!
# XXX Totally empirical conversion, trying to adapt it
# (from 0.0 - 100.0 FBX shininess range to 1.0 - 0.0 Principled BSDF range)...
shininess = (1.0 - ma_wrap.roughness) * 10
shininess *= shininess
elem_props_template_set(tmpl, props, "p_number", b"Shininess", shininess)
elem_props_template_set(tmpl, props, "p_number", b"ShininessExponent", shininess)
elem_props_template_set(tmpl, props, "p_color", b"ReflectionColor", ma_wrap.base_color)
elem_props_template_set(tmpl, props, "p_number", b"ReflectionFactor", ma_wrap.metallic)
elem_props_template_finalize(tmpl, props)
# Custom properties.
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, ma)
def _gen_vid_path(img, scene_data):
msetts = scene_data.settings.media_settings
fname_rel = bpy_extras.io_utils.path_reference(img.filepath, msetts.base_src, msetts.base_dst, msetts.path_mode,
msetts.subdir, msetts.copy_set, img.library)
fname_abs = os.path.normpath(os.path.abspath(os.path.join(msetts.base_dst, fname_rel)))
return fname_abs, fname_rel
def fbx_data_texture_file_elements(root, blender_tex_key, scene_data):
"""
Write the (file) Texture data block.
"""
# XXX All this is very fuzzy to me currently...
# Textures do not seem to use properties as much as they could.
# For now assuming most logical and simple stuff.
ma, sock_name = blender_tex_key
ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=True)
tex_key, _fbx_prop = scene_data.data_textures[blender_tex_key]
tex = getattr(ma_wrap, sock_name)
img = tex.image
fname_abs, fname_rel = _gen_vid_path(img, scene_data)
fbx_tex = elem_data_single_int64(root, b"Texture", get_fbx_uuid_from_key(tex_key))
fbx_tex.add_string(fbx_name_class(sock_name.encode(), b"Texture"))
fbx_tex.add_string(b"")
elem_data_single_string(fbx_tex, b"Type", b"TextureVideoClip")
elem_data_single_int32(fbx_tex, b"Version", FBX_TEXTURE_VERSION)
elem_data_single_string(fbx_tex, b"TextureName", fbx_name_class(sock_name.encode(), b"Texture"))
elem_data_single_string(fbx_tex, b"Media", fbx_name_class(img.name.encode(), b"Video"))
elem_data_single_string_unicode(fbx_tex, b"FileName", fname_abs)
elem_data_single_string_unicode(fbx_tex, b"RelativeFilename", fname_rel)
alpha_source = 0 # None
if img.alpha_mode != 'NONE':
# ~ if tex.texture.use_calculate_alpha:
# ~ alpha_source = 1 # RGBIntensity as alpha.
# ~ else:
# ~ alpha_source = 2 # Black, i.e. alpha channel.
alpha_source = 2 # Black, i.e. alpha channel.
# BlendMode not useful for now, only affects layered textures afaics.
mapping = 0 # UV.
uvset = None
if tex.texcoords == 'ORCO': # XXX Others?
if tex.projection == 'FLAT':
mapping = 1 # Planar
elif tex.projection == 'CUBE':
mapping = 4 # Box
elif tex.projection == 'TUBE':
mapping = 3 # Cylindrical
elif tex.projection == 'SPHERE':
mapping = 2 # Spherical
elif tex.texcoords == 'UV':
mapping = 0 # UV
# Yuck, UVs are linked by mere names it seems... :/
# XXX TODO how to get that now???
# uvset = tex.uv_layer
wrap_mode = 1 # Clamp
if tex.extension == 'REPEAT':
wrap_mode = 0 # Repeat
tmpl = elem_props_template_init(scene_data.templates, b"TextureFile")
props = elem_properties(fbx_tex)
elem_props_template_set(tmpl, props, "p_enum", b"AlphaSource", alpha_source)
elem_props_template_set(tmpl, props, "p_bool", b"PremultiplyAlpha",
img.alpha_mode in {'STRAIGHT'}) # Or is it PREMUL?
elem_props_template_set(tmpl, props, "p_enum", b"CurrentMappingType", mapping)
if uvset is not None:
elem_props_template_set(tmpl, props, "p_string", b"UVSet", uvset)
elem_props_template_set(tmpl, props, "p_enum", b"WrapModeU", wrap_mode)
elem_props_template_set(tmpl, props, "p_enum", b"WrapModeV", wrap_mode)
elem_props_template_set(tmpl, props, "p_vector_3d", b"Translation", tex.translation)
elem_props_template_set(tmpl, props, "p_vector_3d", b"Rotation", (-r for r in tex.rotation))
elem_props_template_set(tmpl, props, "p_vector_3d", b"Scaling", (((1.0 / s) if s != 0.0 else 1.0) for s in tex.scale))
# UseMaterial should always be ON imho.
elem_props_template_set(tmpl, props, "p_bool", b"UseMaterial", True)
elem_props_template_set(tmpl, props, "p_bool", b"UseMipMap", False)
elem_props_template_finalize(tmpl, props)
# No custom properties, since that's not a data-block anymore.
def fbx_data_video_elements(root, vid, scene_data):
"""
Write the actual image data block.
"""
msetts = scene_data.settings.media_settings
vid_key, _texs = scene_data.data_videos[vid]
fname_abs, fname_rel = _gen_vid_path(vid, scene_data)
fbx_vid = elem_data_single_int64(root, b"Video", get_fbx_uuid_from_key(vid_key))
fbx_vid.add_string(fbx_name_class(vid.name.encode(), b"Video"))
fbx_vid.add_string(b"Clip")
elem_data_single_string(fbx_vid, b"Type", b"Clip")
# XXX No Version???
tmpl = elem_props_template_init(scene_data.templates, b"Video")
props = elem_properties(fbx_vid)
elem_props_template_set(tmpl, props, "p_string_url", b"Path", fname_abs)
elem_props_template_finalize(tmpl, props)
elem_data_single_int32(fbx_vid, b"UseMipMap", 0)
elem_data_single_string_unicode(fbx_vid, b"Filename", fname_abs)
elem_data_single_string_unicode(fbx_vid, b"RelativeFilename", fname_rel)
if scene_data.settings.media_settings.embed_textures:
if vid.packed_file is not None:
# We only ever embed a given file once!
if fname_abs not in msetts.embedded_set:
elem_data_single_bytes(fbx_vid, b"Content", vid.packed_file.data)
msetts.embedded_set.add(fname_abs)
else:
filepath = bpy.path.abspath(vid.filepath)
# We only ever embed a given file once!
if filepath not in msetts.embedded_set:
try:
with open(filepath, 'br') as f:
elem_data_single_bytes(fbx_vid, b"Content", f.read())
except Exception as e:
print("WARNING: embedding file {} failed ({})".format(filepath, e))
elem_data_single_bytes(fbx_vid, b"Content", b"")
msetts.embedded_set.add(filepath)
# Looks like we'd rather not write any 'Content' element in this case (see T44442).
# Sounds suspect, but let's try it!
#~ else:
#~ elem_data_single_bytes(fbx_vid, b"Content", b"")
def fbx_data_armature_elements(root, arm_obj, scene_data):
"""
Write:
* Bones "data" (NodeAttribute::LimbNode, contains pretty much nothing!).
* Deformers (i.e. Skin), bind between an armature and a mesh.
** SubDeformers (i.e. Cluster), one per bone/vgroup pair.
* BindPose.
Note armature itself has no data, it is a mere "Null" Model...
"""
mat_world_arm = arm_obj.fbx_object_matrix(scene_data, global_space=True)
bones = tuple(bo_obj for bo_obj in arm_obj.bones if bo_obj in scene_data.objects)
bone_radius_scale = 33.0
# Bones "data".
for bo_obj in bones:
bo = bo_obj.bdata
bo_data_key = scene_data.data_bones[bo_obj]
fbx_bo = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(bo_data_key))
fbx_bo.add_string(fbx_name_class(bo.name.encode(), b"NodeAttribute"))
fbx_bo.add_string(b"LimbNode")
elem_data_single_string(fbx_bo, b"TypeFlags", b"Skeleton")
tmpl = elem_props_template_init(scene_data.templates, b"Bone")
props = elem_properties(fbx_bo)
elem_props_template_set(tmpl, props, "p_double", b"Size", bo.head_radius * bone_radius_scale)
elem_props_template_finalize(tmpl, props)
# Custom properties.
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, bo)
# Store Blender bone length - XXX Not much useful actually :/
# (LimbLength can't be used because it is a scale factor 0-1 for the parent-child distance:
# http://docs.autodesk.com/FBX/2014/ENU/FBX-SDK-Documentation/cpp_ref/class_fbx_skeleton.html#a9bbe2a70f4ed82cd162620259e649f0f )
# elem_props_set(props, "p_double", "BlenderBoneLength".encode(), (bo.tail_local - bo.head_local).length, custom=True)
# Skin deformers and BindPoses.
# Note: we might also use Deformers for our "parent to vertex" stuff???
deformer = scene_data.data_deformers_skin.get(arm_obj, None)
if deformer is not None:
for me, (skin_key, ob_obj, clusters) in deformer.items():
# BindPose.
mat_world_obj, mat_world_bones = fbx_data_bindpose_element(root, ob_obj, me, scene_data,
arm_obj, mat_world_arm, bones)
# Deformer.
fbx_skin = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(skin_key))
fbx_skin.add_string(fbx_name_class(arm_obj.name.encode(), b"Deformer"))
fbx_skin.add_string(b"Skin")
elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is...
# Pre-process vertex weights (also to check vertices assigned ot more than four bones).
ob = ob_obj.bdata
bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index
for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups}
valid_idxs = set(bo_vg_idx.values())
vgroups = {vg.index: {} for vg in ob.vertex_groups}
verts_vgroups = (sorted(((vg.group, vg.weight) for vg in v.groups if vg.weight and vg.group in valid_idxs),
key=lambda e: e[1], reverse=True)
for v in me.vertices)
for idx, vgs in enumerate(verts_vgroups):
for vg_idx, w in vgs:
vgroups[vg_idx][idx] = w
for bo_obj, clstr_key in clusters.items():
bo = bo_obj.bdata
# Find which vertices are affected by this bone/vgroup pair, and matching weights.
# Note we still write a cluster for bones not affecting the mesh, to get 'rest pose' data
# (the TransformBlah matrices).
vg_idx = bo_vg_idx.get(bo.name, None)
indices, weights = ((), ()) if vg_idx is None or not vgroups[vg_idx] else zip(*vgroups[vg_idx].items())
# Create the cluster.
fbx_clstr = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(clstr_key))
fbx_clstr.add_string(fbx_name_class(bo.name.encode(), b"SubDeformer"))
fbx_clstr.add_string(b"Cluster")
elem_data_single_int32(fbx_clstr, b"Version", FBX_DEFORMER_CLUSTER_VERSION)
# No idea what that user data might be...
fbx_userdata = elem_data_single_string(fbx_clstr, b"UserData", b"")
fbx_userdata.add_string(b"")
if indices:
elem_data_single_int32_array(fbx_clstr, b"Indexes", indices)
elem_data_single_float64_array(fbx_clstr, b"Weights", weights)
# Transform, TransformLink and TransformAssociateModel matrices...
# They seem to be doublons of BindPose ones??? Have armature (associatemodel) in addition, though.
# WARNING! Even though official FBX API presents Transform in global space,
# **it is stored in bone space in FBX data!** See:
# http://area.autodesk.com/forum/autodesk-fbx/fbx-sdk/why-the-values-return-
# by-fbxcluster-gettransformmatrix-x-not-same-with-the-value-in-ascii-fbx-file/
elem_data_single_float64_array(fbx_clstr, b"Transform",
matrix4_to_array(mat_world_bones[bo_obj].inverted_safe() @ mat_world_obj))
elem_data_single_float64_array(fbx_clstr, b"TransformLink", matrix4_to_array(mat_world_bones[bo_obj]))
elem_data_single_float64_array(fbx_clstr, b"TransformAssociateModel", matrix4_to_array(mat_world_arm))
def fbx_data_leaf_bone_elements(root, scene_data):
# Write a dummy leaf bone that is used by applications to show the length of the last bone in a chain
for (node_name, _par_uuid, node_uuid, attr_uuid, matrix, hide, size) in scene_data.data_leaf_bones:
# Bone 'data'...
fbx_bo = elem_data_single_int64(root, b"NodeAttribute", attr_uuid)
fbx_bo.add_string(fbx_name_class(node_name.encode(), b"NodeAttribute"))
fbx_bo.add_string(b"LimbNode")
elem_data_single_string(fbx_bo, b"TypeFlags", b"Skeleton")
tmpl = elem_props_template_init(scene_data.templates, b"Bone")
props = elem_properties(fbx_bo)
elem_props_template_set(tmpl, props, "p_double", b"Size", size)
elem_props_template_finalize(tmpl, props)
# And bone object.
model = elem_data_single_int64(root, b"Model", node_uuid)
model.add_string(fbx_name_class(node_name.encode(), b"Model"))
model.add_string(b"LimbNode")
elem_data_single_int32(model, b"Version", FBX_MODELS_VERSION)
# Object transform info.
loc, rot, scale = matrix.decompose()
rot = rot.to_euler('XYZ')
rot = tuple(convert_rad_to_deg_iter(rot))
tmpl = elem_props_template_init(scene_data.templates, b"Model")
# For now add only loc/rot/scale...
props = elem_properties(model)
# Generated leaf bones are obviously never animated!
elem_props_template_set(tmpl, props, "p_lcl_translation", b"Lcl Translation", loc)
elem_props_template_set(tmpl, props, "p_lcl_rotation", b"Lcl Rotation", rot)
elem_props_template_set(tmpl, props, "p_lcl_scaling", b"Lcl Scaling", scale)
elem_props_template_set(tmpl, props, "p_visibility", b"Visibility", float(not hide))
# Absolutely no idea what this is, but seems mandatory for validity of the file, and defaults to
# invalid -1 value...
elem_props_template_set(tmpl, props, "p_integer", b"DefaultAttributeIndex", 0)
elem_props_template_set(tmpl, props, "p_enum", b"InheritType", 1) # RSrs
# Those settings would obviously need to be edited in a complete version of the exporter, may depends on
# object type, etc.
elem_data_single_int32(model, b"MultiLayer", 0)
elem_data_single_int32(model, b"MultiTake", 0)
elem_data_single_bool(model, b"Shading", True)
elem_data_single_string(model, b"Culling", b"CullingOff")
elem_props_template_finalize(tmpl, props)
def fbx_data_object_elements(root, ob_obj, scene_data):
"""
Write the Object (Model) data blocks.
Note this "Model" can also be bone or dupli!
"""
obj_type = b"Null" # default, sort of empty...
if ob_obj.is_bone:
obj_type = b"LimbNode"
elif (ob_obj.type == 'ARMATURE'):
if scene_data.settings.armature_nodetype == 'ROOT':
obj_type = b"Root"
elif scene_data.settings.armature_nodetype == 'LIMBNODE':
obj_type = b"LimbNode"
else: # Default, preferred option...
obj_type = b"Null"
elif (ob_obj.type in BLENDER_OBJECT_TYPES_MESHLIKE):
obj_type = b"Mesh"
elif (ob_obj.type == 'LIGHT'):
obj_type = b"Light"
elif (ob_obj.type == 'CAMERA'):
obj_type = b"Camera"
model = elem_data_single_int64(root, b"Model", ob_obj.fbx_uuid)
model.add_string(fbx_name_class(ob_obj.name.encode(), b"Model"))
model.add_string(obj_type)
elem_data_single_int32(model, b"Version", FBX_MODELS_VERSION)
# Object transform info.
loc, rot, scale, matrix, matrix_rot = ob_obj.fbx_object_tx(scene_data)
rot = tuple(convert_rad_to_deg_iter(rot))
tmpl = elem_props_template_init(scene_data.templates, b"Model")
# For now add only loc/rot/scale...
props = elem_properties(model)
elem_props_template_set(tmpl, props, "p_lcl_translation", b"Lcl Translation", loc,
animatable=True, animated=((ob_obj.key, "Lcl Translation") in scene_data.animated))
elem_props_template_set(tmpl, props, "p_lcl_rotation", b"Lcl Rotation", rot,
animatable=True, animated=((ob_obj.key, "Lcl Rotation") in scene_data.animated))
elem_props_template_set(tmpl, props, "p_lcl_scaling", b"Lcl Scaling", scale,
animatable=True, animated=((ob_obj.key, "Lcl Scaling") in scene_data.animated))
elem_props_template_set(tmpl, props, "p_visibility", b"Visibility", float(not ob_obj.hide))
# Absolutely no idea what this is, but seems mandatory for validity of the file, and defaults to
# invalid -1 value...
elem_props_template_set(tmpl, props, "p_integer", b"DefaultAttributeIndex", 0)
elem_props_template_set(tmpl, props, "p_enum", b"InheritType", 1) # RSrs
# Custom properties.
if scene_data.settings.use_custom_props:
# Here we want customprops from the 'pose' bone, not the 'edit' bone...
bdata = ob_obj.bdata_pose_bone if ob_obj.is_bone else ob_obj.bdata
fbx_data_element_custom_properties(props, bdata)
# Those settings would obviously need to be edited in a complete version of the exporter, may depends on
# object type, etc.
elem_data_single_int32(model, b"MultiLayer", 0)
elem_data_single_int32(model, b"MultiTake", 0)
elem_data_single_bool(model, b"Shading", True)
elem_data_single_string(model, b"Culling", b"CullingOff")
if obj_type == b"Camera":
# Why, oh why are FBX cameras such a mess???
# And WHY add camera data HERE??? Not even sure this is needed...
render = scene_data.scene.render
width = render.resolution_x * 1.0
height = render.resolution_y * 1.0
elem_props_template_set(tmpl, props, "p_enum", b"ResolutionMode", 0) # Don't know what it means
elem_props_template_set(tmpl, props, "p_double", b"AspectW", width)
elem_props_template_set(tmpl, props, "p_double", b"AspectH", height)
elem_props_template_set(tmpl, props, "p_bool", b"ViewFrustum", True)
elem_props_template_set(tmpl, props, "p_enum", b"BackgroundMode", 0) # Don't know what it means
elem_props_template_set(tmpl, props, "p_bool", b"ForegroundTransparent", True)
elem_props_template_finalize(tmpl, props)
def fbx_data_animation_elements(root, scene_data):
"""
Write animation data.
"""
animations = scene_data.animations
if not animations:
return
scene = scene_data.scene
fps = scene.render.fps / scene.render.fps_base
def keys_to_ktimes(keys):
return (int(v) for v in convert_sec_to_ktime_iter((f / fps for f, _v in keys)))
# Animation stacks.
for astack_key, alayers, alayer_key, name, f_start, f_end in animations:
astack = elem_data_single_int64(root, b"AnimationStack", get_fbx_uuid_from_key(astack_key))
astack.add_string(fbx_name_class(name, b"AnimStack"))
astack.add_string(b"")
astack_tmpl = elem_props_template_init(scene_data.templates, b"AnimationStack")
astack_props = elem_properties(astack)
r = scene_data.scene.render
fps = r.fps / r.fps_base
start = int(convert_sec_to_ktime(f_start / fps))
end = int(convert_sec_to_ktime(f_end / fps))
elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"LocalStart", start)
elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"LocalStop", end)
elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"ReferenceStart", start)
elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"ReferenceStop", end)
elem_props_template_finalize(astack_tmpl, astack_props)
# For now, only one layer for all animations.
alayer = elem_data_single_int64(root, b"AnimationLayer", get_fbx_uuid_from_key(alayer_key))
alayer.add_string(fbx_name_class(name, b"AnimLayer"))
alayer.add_string(b"")
for ob_obj, (alayer_key, acurvenodes) in alayers.items():
# Animation layer.
# alayer = elem_data_single_int64(root, b"AnimationLayer", get_fbx_uuid_from_key(alayer_key))
# alayer.add_string(fbx_name_class(ob_obj.name.encode(), b"AnimLayer"))
# alayer.add_string(b"")
for fbx_prop, (acurvenode_key, acurves, acurvenode_name) in acurvenodes.items():
# Animation curve node.
acurvenode = elem_data_single_int64(root, b"AnimationCurveNode", get_fbx_uuid_from_key(acurvenode_key))
acurvenode.add_string(fbx_name_class(acurvenode_name.encode(), b"AnimCurveNode"))
acurvenode.add_string(b"")
acn_tmpl = elem_props_template_init(scene_data.templates, b"AnimationCurveNode")
acn_props = elem_properties(acurvenode)
for fbx_item, (acurve_key, def_value, keys, _acurve_valid) in acurves.items():
elem_props_template_set(acn_tmpl, acn_props, "p_number", fbx_item.encode(),
def_value, animatable=True)
# Only create Animation curve if needed!
if keys:
acurve = elem_data_single_int64(root, b"AnimationCurve", get_fbx_uuid_from_key(acurve_key))
acurve.add_string(fbx_name_class(b"", b"AnimCurve"))
acurve.add_string(b"")
# key attributes...
nbr_keys = len(keys)
# flags...
keyattr_flags = (
1 << 2 | # interpolation mode, 1 = constant, 2 = linear, 3 = cubic.
1 << 8 | # tangent mode, 8 = auto, 9 = TCB, 10 = user, 11 = generic break,
1 << 13 | # tangent mode, 12 = generic clamp, 13 = generic time independent,
1 << 14 | # tangent mode, 13 + 14 = generic clamp progressive.
0,
)
# Maybe values controlling TCB & co???
keyattr_datafloat = (0.0, 0.0, 9.419963346924634e-30, 0.0)
# And now, the *real* data!
elem_data_single_float64(acurve, b"Default", def_value)
elem_data_single_int32(acurve, b"KeyVer", FBX_ANIM_KEY_VERSION)
elem_data_single_int64_array(acurve, b"KeyTime", keys_to_ktimes(keys))
elem_data_single_float32_array(acurve, b"KeyValueFloat", (v for _f, v in keys))
elem_data_single_int32_array(acurve, b"KeyAttrFlags", keyattr_flags)
elem_data_single_float32_array(acurve, b"KeyAttrDataFloat", keyattr_datafloat)
elem_data_single_int32_array(acurve, b"KeyAttrRefCount", (nbr_keys,))
elem_props_template_finalize(acn_tmpl, acn_props)
# ##### Top-level FBX data container. #####
# Mapping Blender -> FBX (principled_socket_name, fbx_name).
PRINCIPLED_TEXTURE_SOCKETS_TO_FBX = (
# ("diffuse", "diffuse", b"DiffuseFactor"),
("base_color_texture", b"DiffuseColor"),
("alpha_texture", b"TransparencyFactor"), # Will be inverted in fact, not much we can do really...
# ("base_color_texture", b"TransparentColor"), # Uses diffuse color in Blender!
# ("emit", "emit", b"EmissiveFactor"),
("emission_color_texture", b"EmissiveColor"),
# ("ambient", "ambient", b"AmbientFactor"),
# ("", "", b"AmbientColor"), # World stuff in Blender, for now ignore...
("normalmap_texture", b"NormalMap"),
# Note: unsure about those... :/
# ("", "", b"Bump"),
# ("", "", b"BumpFactor"),
# ("", "", b"DisplacementColor"),
# ("", "", b"DisplacementFactor"),
("specular_texture", b"SpecularFactor"),
# ("base_color", b"SpecularColor"), # TODO: use tint?
# See Material template about those two!
("roughness_texture", b"Shininess"),
("roughness_texture", b"ShininessExponent"),
# ("mirror", "mirror", b"ReflectionColor"),
("metallic_texture", b"ReflectionFactor"),
)
def fbx_skeleton_from_armature(scene, settings, arm_obj, objects, data_meshes,
data_bones, data_deformers_skin, data_empties, arm_parents):
"""
Create skeleton from armature/bones (NodeAttribute/LimbNode and Model/LimbNode), and for each deformed mesh,
create Pose/BindPose(with sub PoseNode) and Deformer/Skin(with Deformer/SubDeformer/Cluster).
Also supports "parent to bone" (simple parent to Model/LimbNode).
arm_parents is a set of tuples (armature, object) for all successful armature bindings.
"""
# We need some data for our armature 'object' too!!!
data_empties[arm_obj] = get_blender_empty_key(arm_obj.bdata)
arm_data = arm_obj.bdata.data
bones = {}
for bo in arm_obj.bones:
if settings.use_armature_deform_only:
if bo.bdata.use_deform:
bones[bo] = True
bo_par = bo.parent
while bo_par.is_bone:
bones[bo_par] = True
bo_par = bo_par.parent
elif bo not in bones: # Do not override if already set in the loop above!
bones[bo] = False
else:
bones[bo] = True
bones = {bo: None for bo, use in bones.items() if use}
if not bones:
return
data_bones.update((bo, get_blender_bone_key(arm_obj.bdata, bo.bdata)) for bo in bones)
for ob_obj in objects:
if not ob_obj.is_deformed_by_armature(arm_obj):
continue
# Always handled by an Armature modifier...
found = False
for mod in ob_obj.bdata.modifiers:
if mod.type not in {'ARMATURE'} or not mod.object:
continue
# We only support vertex groups binding method, not bone envelopes one!
if mod.object in {arm_obj.bdata, arm_obj.bdata.proxy} and mod.use_vertex_groups:
found = True
break
if not found:
continue
# Now we have a mesh using this armature.
# Note: bindpose have no relations at all (no connections), so no need for any preprocess for them.
# Create skin & clusters relations (note skins are connected to geometry, *not* model!).
_key, me, _free = data_meshes[ob_obj]
clusters = {bo: get_blender_bone_cluster_key(arm_obj.bdata, me, bo.bdata) for bo in bones}
data_deformers_skin.setdefault(arm_obj, {})[me] = (get_blender_armature_skin_key(arm_obj.bdata, me),
ob_obj, clusters)
# We don't want a regular parent relationship for those in FBX...
arm_parents.add((arm_obj, ob_obj))
# Needed to handle matrices/spaces (since we do not parent them to 'armature' in FBX :/ ).
ob_obj.parented_to_armature = True
objects.update(bones)
def fbx_generate_leaf_bones(settings, data_bones):
# find which bons have no children
child_count = {bo: 0 for bo in data_bones.keys()}
for bo in data_bones.keys():
if bo.parent and bo.parent.is_bone:
child_count[bo.parent] += 1
bone_radius_scale = settings.global_scale * 33.0
# generate bone data
leaf_parents = [bo for bo, count in child_count.items() if count == 0]
leaf_bones = []
for parent in leaf_parents:
node_name = parent.name + "_end"
parent_uuid = parent.fbx_uuid
parent_key = parent.key
node_uuid = get_fbx_uuid_from_key(parent_key + "_end_node")
attr_uuid = get_fbx_uuid_from_key(parent_key + "_end_nodeattr")
hide = parent.hide
size = parent.bdata.head_radius * bone_radius_scale
bone_length = (parent.bdata.tail_local - parent.bdata.head_local).length
matrix = Matrix.Translation((0, bone_length, 0))
if settings.bone_correction_matrix_inv:
matrix = settings.bone_correction_matrix_inv @ matrix
if settings.bone_correction_matrix:
matrix = matrix @ settings.bone_correction_matrix
leaf_bones.append((node_name, parent_uuid, node_uuid, attr_uuid, matrix, hide, size))
return leaf_bones
def fbx_animations_do(scene_data, ref_id, f_start, f_end, start_zero, objects=None, force_keep=False):
"""
Generate animation data (a single AnimStack) from objects, for a given frame range.
"""
bake_step = scene_data.settings.bake_anim_step
simplify_fac = scene_data.settings.bake_anim_simplify_factor
scene = scene_data.scene
depsgraph = scene_data.depsgraph
force_keying = scene_data.settings.bake_anim_use_all_bones
force_sek = scene_data.settings.bake_anim_force_startend_keying
if objects is not None:
# Add bones and duplis!
for ob_obj in tuple(objects):
if not ob_obj.is_object:
continue
if ob_obj.type == 'ARMATURE':
objects |= {bo_obj for bo_obj in ob_obj.bones if bo_obj in scene_data.objects}
for dp_obj in ob_obj.dupli_list_gen(depsgraph):
if dp_obj in scene_data.objects:
objects.add(dp_obj)
else:
objects = scene_data.objects
back_currframe = scene.frame_current
animdata_ob = {}
p_rots = {}
for ob_obj in objects:
if ob_obj.parented_to_armature:
continue
ACNW = AnimationCurveNodeWrapper
loc, rot, scale, _m, _mr = ob_obj.fbx_object_tx(scene_data)
rot_deg = tuple(convert_rad_to_deg_iter(rot))
force_key = (simplify_fac == 0.0) or (ob_obj.is_bone and force_keying)
animdata_ob[ob_obj] = (ACNW(ob_obj.key, 'LCL_TRANSLATION', force_key, force_sek, loc),
ACNW(ob_obj.key, 'LCL_ROTATION', force_key, force_sek, rot_deg),
ACNW(ob_obj.key, 'LCL_SCALING', force_key, force_sek, scale))
p_rots[ob_obj] = rot
force_key = (simplify_fac == 0.0)
animdata_shapes = {}
for me, (me_key, _shapes_key, shapes) in scene_data.data_deformers_shape.items():
# Ignore absolute shape keys for now!
if not me.shape_keys.use_relative:
continue
for shape, (channel_key, geom_key, _shape_verts_co, _shape_verts_idx) in shapes.items():
acnode = AnimationCurveNodeWrapper(channel_key, 'SHAPE_KEY', force_key, force_sek, (0.0,))
# Sooooo happy to have to twist again like a mad snake... Yes, we need to write those curves twice. :/
acnode.add_group(me_key, shape.name, shape.name, (shape.name,))
animdata_shapes[channel_key] = (acnode, me, shape)
animdata_cameras = {}
for cam_obj, cam_key in scene_data.data_cameras.items():
cam = cam_obj.bdata.data
acnode = AnimationCurveNodeWrapper(cam_key, 'CAMERA_FOCAL', force_key, force_sek, (cam.lens,))
animdata_cameras[cam_key] = (acnode, cam)
currframe = f_start
while currframe <= f_end:
real_currframe = currframe - f_start if start_zero else currframe
scene.frame_set(int(currframe), subframe=currframe - int(currframe))
for dp_obj in ob_obj.dupli_list_gen(depsgraph):
pass # Merely updating dupli matrix of ObjectWrapper...
for ob_obj, (anim_loc, anim_rot, anim_scale) in animdata_ob.items():
# We compute baked loc/rot/scale for all objects (rot being euler-compat with previous value!).
p_rot = p_rots.get(ob_obj, None)
loc, rot, scale, _m, _mr = ob_obj.fbx_object_tx(scene_data, rot_euler_compat=p_rot)
p_rots[ob_obj] = rot
anim_loc.add_keyframe(real_currframe, loc)
anim_rot.add_keyframe(real_currframe, tuple(convert_rad_to_deg_iter(rot)))
anim_scale.add_keyframe(real_currframe, scale)
for anim_shape, me, shape in animdata_shapes.values():
anim_shape.add_keyframe(real_currframe, (shape.value * 100.0,))
for anim_camera, camera in animdata_cameras.values():
anim_camera.add_keyframe(real_currframe, (camera.lens,))
currframe += bake_step
scene.frame_set(back_currframe, subframe=0.0)
animations = {}
# And now, produce final data (usable by FBX export code)
# Objects-like loc/rot/scale...
for ob_obj, anims in animdata_ob.items():
for anim in anims:
anim.simplify(simplify_fac, bake_step, force_keep)
if not anim:
continue
for obj_key, group_key, group, fbx_group, fbx_gname in anim.get_final_data(scene, ref_id, force_keep):
anim_data = animations.setdefault(obj_key, ("dummy_unused_key", {}))
anim_data[1][fbx_group] = (group_key, group, fbx_gname)
# And meshes' shape keys.
for channel_key, (anim_shape, me, shape) in animdata_shapes.items():
final_keys = {}
anim_shape.simplify(simplify_fac, bake_step, force_keep)
if not anim_shape:
continue
for elem_key, group_key, group, fbx_group, fbx_gname in anim_shape.get_final_data(scene, ref_id, force_keep):
anim_data = animations.setdefault(elem_key, ("dummy_unused_key", {}))
anim_data[1][fbx_group] = (group_key, group, fbx_gname)
# And cameras' lens keys.
for cam_key, (anim_camera, camera) in animdata_cameras.items():
final_keys = {}
anim_camera.simplify(simplify_fac, bake_step, force_keep)
if not anim_camera:
continue
for elem_key, group_key, group, fbx_group, fbx_gname in anim_camera.get_final_data(scene, ref_id, force_keep):
anim_data = animations.setdefault(elem_key, ("dummy_unused_key", {}))
anim_data[1][fbx_group] = (group_key, group, fbx_gname)
astack_key = get_blender_anim_stack_key(scene, ref_id)
alayer_key = get_blender_anim_layer_key(scene, ref_id)
name = (get_blenderID_name(ref_id) if ref_id else scene.name).encode()
if start_zero:
f_end -= f_start
f_start = 0.0
return (astack_key, animations, alayer_key, name, f_start, f_end) if animations else None
def fbx_animations(scene_data):
"""
Generate global animation data from objects.
"""
scene = scene_data.scene
animations = []
animated = set()
frame_start = 1e100
frame_end = -1e100
def add_anim(animations, animated, anim):
nonlocal frame_start, frame_end
if anim is not None:
animations.append(anim)
f_start, f_end = anim[4:6]
if f_start < frame_start:
frame_start = f_start
if f_end > frame_end:
frame_end = f_end
_astack_key, astack, _alayer_key, _name, _fstart, _fend = anim
for elem_key, (alayer_key, acurvenodes) in astack.items():
for fbx_prop, (acurvenode_key, acurves, acurvenode_name) in acurvenodes.items():
animated.add((elem_key, fbx_prop))
# Per-NLA strip animstacks.
if scene_data.settings.bake_anim_use_nla_strips:
strips = []
ob_actions = []
for ob_obj in scene_data.objects:
# NLA tracks only for objects, not bones!
if not ob_obj.is_object:
continue
ob = ob_obj.bdata # Back to real Blender Object.
if not ob.animation_data:
continue
# We have to remove active action from objects, it overwrites strips actions otherwise...
ob_actions.append((ob, ob.animation_data.action))
ob.animation_data.action = None
for track in ob.animation_data.nla_tracks:
if track.mute:
continue
for strip in track.strips:
if strip.mute:
continue
strips.append(strip)
strip.mute = True
for strip in strips:
strip.mute = False
add_anim(animations, animated,
fbx_animations_do(scene_data, strip, strip.frame_start, strip.frame_end, True, force_keep=True))
strip.mute = True
scene.frame_set(scene.frame_current, subframe=0.0)
for strip in strips:
strip.mute = False
for ob, ob_act in ob_actions:
ob.animation_data.action = ob_act
# All actions.
if scene_data.settings.bake_anim_use_all_actions:
def validate_actions(act, path_resolve):
for fc in act.fcurves:
data_path = fc.data_path
if fc.array_index:
data_path = data_path + "[%d]" % fc.array_index
try:
path_resolve(data_path)
except ValueError:
return False # Invalid.
return True # Valid.
def restore_object(ob_to, ob_from):
# Restore org state of object (ugh :/ ).
props = (
'location', 'rotation_quaternion', 'rotation_axis_angle', 'rotation_euler', 'rotation_mode', 'scale',
'delta_location', 'delta_rotation_euler', 'delta_rotation_quaternion', 'delta_scale',
'lock_location', 'lock_rotation', 'lock_rotation_w', 'lock_rotations_4d', 'lock_scale',
'tag', 'track_axis', 'up_axis', 'active_material', 'active_material_index',
'matrix_parent_inverse', 'empty_display_type', 'empty_display_size', 'empty_image_offset', 'pass_index',
'color', 'hide_viewport', 'hide_select', 'hide_render', 'instance_type',
'use_instance_vertices_rotation', 'use_instance_faces_scale', 'instance_faces_scale',
'display_type', 'show_bounds', 'display_bounds_type', 'show_name', 'show_axis', 'show_texture_space',
'show_wire', 'show_all_edges', 'show_transparent', 'show_in_front',
'show_only_shape_key', 'use_shape_key_edit_mode', 'active_shape_key_index',
)
for p in props:
if not ob_to.is_property_readonly(p):
setattr(ob_to, p, getattr(ob_from, p))
for ob_obj in scene_data.objects:
# Actions only for objects, not bones!
if not ob_obj.is_object:
continue
ob = ob_obj.bdata # Back to real Blender Object.
if not ob.animation_data:
continue # Do not export animations for objects that are absolutely not animated, see T44386.
if ob.animation_data.is_property_readonly('action'):
continue # Cannot re-assign 'active action' to this object (usually related to NLA usage, see T48089).
# We can't play with animdata and actions and get back to org state easily.
# So we have to add a temp copy of the object to the scene, animate it, and remove it... :/
ob_copy = ob.copy()
# Great, have to handle bones as well if needed...
pbones_matrices = [pbo.matrix_basis.copy() for pbo in ob.pose.bones] if ob.type == 'ARMATURE' else ...
org_act = ob.animation_data.action
path_resolve = ob.path_resolve
for act in bpy.data.actions:
# For now, *all* paths in the action must be valid for the object, to validate the action.
# Unless that action was already assigned to the object!
if act != org_act and not validate_actions(act, path_resolve):
continue
ob.animation_data.action = act
frame_start, frame_end = act.frame_range # sic!
add_anim(animations, animated,
fbx_animations_do(scene_data, (ob, act), frame_start, frame_end, True,
objects={ob_obj}, force_keep=True))
# Ugly! :/
if pbones_matrices is not ...:
for pbo, mat in zip(ob.pose.bones, pbones_matrices):
pbo.matrix_basis = mat.copy()
ob.animation_data.action = org_act
restore_object(ob, ob_copy)
scene.frame_set(scene.frame_current, subframe=0.0)
if pbones_matrices is not ...:
for pbo, mat in zip(ob.pose.bones, pbones_matrices):
pbo.matrix_basis = mat.copy()
ob.animation_data.action = org_act
bpy.data.objects.remove(ob_copy)
scene.frame_set(scene.frame_current, subframe=0.0)
# Global (containing everything) animstack, only if not exporting NLA strips and/or all actions.
if not scene_data.settings.bake_anim_use_nla_strips and not scene_data.settings.bake_anim_use_all_actions:
add_anim(animations, animated, fbx_animations_do(scene_data, None, scene.frame_start, scene.frame_end, False))
# Be sure to update all matrices back to org state!
scene.frame_set(scene.frame_current, subframe=0.0)
return animations, animated, frame_start, frame_end
def fbx_data_from_scene(scene, depsgraph, settings):
"""
Do some pre-processing over scene's data...
"""
objtypes = settings.object_types
dp_objtypes = objtypes - {'ARMATURE'} # Armatures are not supported as dupli instances currently...
perfmon = PerfMon()
perfmon.level_up()
# ##### Gathering data...
perfmon.step("FBX export prepare: Wrapping Objects...")
# This is rather simple for now, maybe we could end generating templates with most-used values
# instead of default ones?
objects = {} # Because we do not have any ordered set...
for ob in settings.context_objects:
if ob.type not in objtypes:
continue
ob_obj = ObjectWrapper(ob)
objects[ob_obj] = None
# Duplis...
for dp_obj in ob_obj.dupli_list_gen(depsgraph):
if dp_obj.type not in dp_objtypes:
continue
objects[dp_obj] = None
perfmon.step("FBX export prepare: Wrapping Data (lamps, cameras, empties)...")
data_lights = {ob_obj.bdata.data: get_blenderID_key(ob_obj.bdata.data)
for ob_obj in objects if ob_obj.type == 'LIGHT'}
# Unfortunately, FBX camera data contains object-level data (like position, orientation, etc.)...
data_cameras = {ob_obj: get_blenderID_key(ob_obj.bdata.data)
for ob_obj in objects if ob_obj.type == 'CAMERA'}
# Yep! Contains nothing, but needed!
data_empties = {ob_obj: get_blender_empty_key(ob_obj.bdata)
for ob_obj in objects if ob_obj.type == 'EMPTY'}
perfmon.step("FBX export prepare: Wrapping Meshes...")
data_meshes = {}
for ob_obj in objects:
if ob_obj.type not in BLENDER_OBJECT_TYPES_MESHLIKE:
continue
ob = ob_obj.bdata
use_org_data = True
org_ob_obj = None
# Do not want to systematically recreate a new mesh for dupliobject instances, kind of break purpose of those.
if ob_obj.is_dupli:
org_ob_obj = ObjectWrapper(ob) # We get the "real" object wrapper from that dupli instance.
if org_ob_obj in data_meshes:
data_meshes[ob_obj] = data_meshes[org_ob_obj]
continue
is_ob_material = any(ms.link == 'OBJECT' for ms in ob.material_slots)
if settings.use_mesh_modifiers or ob.type in BLENDER_OTHER_OBJECT_TYPES or is_ob_material:
# We cannot use default mesh in that case, or material would not be the right ones...
use_org_data = not (is_ob_material or ob.type in BLENDER_OTHER_OBJECT_TYPES)
backup_pose_positions = []
tmp_mods = []
if use_org_data and ob.type == 'MESH':
# No need to create a new mesh in this case, if no modifier is active!
last_subsurf = None
for mod in ob.modifiers:
# For meshes, when armature export is enabled, disable Armature modifiers here!
# XXX Temp hacks here since currently we only have access to a viewport depsgraph...
#
# NOTE: We put armature to the rest pose instead of disabling it so we still
# have vertex groups in the evaluated mesh.
if mod.type == 'ARMATURE' and 'ARMATURE' in settings.object_types:
object = mod.object
if object and object.type == 'ARMATURE':
armature = object.data
backup_pose_positions.append((armature, armature.pose_position))
armature.pose_position = 'REST'
elif mod.show_render or mod.show_viewport:
# If exporting with subsurf collect the last Catmull-Clark subsurf modifier
# and disable it. We can use the original data as long as this is the first
# found applicable subsurf modifier.
if settings.use_subsurf and mod.type == 'SUBSURF' and mod.subdivision_type == 'CATMULL_CLARK':
if last_subsurf:
use_org_data = False
last_subsurf = mod
else:
use_org_data = False
if settings.use_subsurf and last_subsurf:
# XXX: When exporting with subsurf information temporarily disable
# the last subsurf modifier.
tmp_mods.append((last_subsurf, last_subsurf.show_render, last_subsurf.show_viewport))
last_subsurf.show_render = False
last_subsurf.show_viewport = False
if not use_org_data:
# If modifiers has been altered need to update dependency graph.
if backup_pose_positions or tmp_mods:
depsgraph.update()
ob_to_convert = ob.evaluated_get(depsgraph) if settings.use_mesh_modifiers else ob
# NOTE: The dependency graph might be re-evaluating multiple times, which could
# potentially free the mesh created early on. So we put those meshes to bmain and
# free them afterwards. Not ideal but ensures correct ownerwhip.
tmp_me = bpy.data.meshes.new_from_object(
ob_to_convert, preserve_all_data_layers=True, depsgraph=depsgraph)
data_meshes[ob_obj] = (get_blenderID_key(tmp_me), tmp_me, True)
# Change armatures back.
for armature, pose_position in backup_pose_positions:
print((armature, pose_position))
armature.pose_position = pose_position
# Update now, so we don't leave modified state after last object was exported.
# Re-enable temporary disabled modifiers.
for mod, show_render, show_viewport in tmp_mods:
mod.show_render = show_render
mod.show_viewport = show_viewport
if backup_pose_positions or tmp_mods:
depsgraph.update()
if use_org_data:
data_meshes[ob_obj] = (get_blenderID_key(ob.data), ob.data, False)
# In case "real" source object of that dupli did not yet still existed in data_meshes, create it now!
if org_ob_obj is not None:
data_meshes[org_ob_obj] = data_meshes[ob_obj]
perfmon.step("FBX export prepare: Wrapping ShapeKeys...")
# ShapeKeys.
data_deformers_shape = {}
geom_mat_co = settings.global_matrix if settings.bake_space_transform else None
for me_key, me, _free in data_meshes.values():
if not (me.shape_keys and len(me.shape_keys.key_blocks) > 1): # We do not want basis-only relative skeys...
continue
if me in data_deformers_shape:
continue
shapes_key = get_blender_mesh_shape_key(me)
# We gather all vcos first, since some skeys may be based on others...
_cos = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.vertices) * 3
me.vertices.foreach_get("co", _cos)
v_cos = tuple(vcos_transformed_gen(_cos, geom_mat_co))
sk_cos = {}
for shape in me.shape_keys.key_blocks[1:]:
shape.data.foreach_get("co", _cos)
sk_cos[shape] = tuple(vcos_transformed_gen(_cos, geom_mat_co))
sk_base = me.shape_keys.key_blocks[0]
for shape in me.shape_keys.key_blocks[1:]:
# Only write vertices really different from org coordinates!
shape_verts_co = []
shape_verts_idx = []
sv_cos = sk_cos[shape]
ref_cos = v_cos if shape.relative_key == sk_base else sk_cos[shape.relative_key]
for idx, (sv_co, ref_co) in enumerate(zip(sv_cos, ref_cos)):
if similar_values_iter(sv_co, ref_co):
# Note: Maybe this is a bit too simplistic, should we use real shape base here? Though FBX does not
# have this at all... Anyway, this should cover most common cases imho.
continue
shape_verts_co.extend(Vector(sv_co) - Vector(ref_co))
shape_verts_idx.append(idx)
# FBX does not like empty shapes (makes Unity crash e.g.).
# To prevent this, we add a vertex that does nothing, but it keeps the shape key intact
if not shape_verts_co:
shape_verts_co.extend((0, 0, 0))
shape_verts_idx.append(0)
channel_key, geom_key = get_blender_mesh_shape_channel_key(me, shape)
data = (channel_key, geom_key, shape_verts_co, shape_verts_idx)
data_deformers_shape.setdefault(me, (me_key, shapes_key, {}))[2][shape] = data
perfmon.step("FBX export prepare: Wrapping Armatures...")
# Armatures!
data_deformers_skin = {}
data_bones = {}
arm_parents = set()
for ob_obj in tuple(objects):
if not (ob_obj.is_object and ob_obj.type in {'ARMATURE'}):
continue
fbx_skeleton_from_armature(scene, settings, ob_obj, objects, data_meshes,
data_bones, data_deformers_skin, data_empties, arm_parents)
# Generate leaf bones
data_leaf_bones = []
if settings.add_leaf_bones:
data_leaf_bones = fbx_generate_leaf_bones(settings, data_bones)
perfmon.step("FBX export prepare: Wrapping World...")
# Some world settings are embedded in FBX materials...
if scene.world:
data_world = {scene.world: get_blenderID_key(scene.world)}
else:
data_world = {}
perfmon.step("FBX export prepare: Wrapping Materials...")
# TODO: Check all the material stuff works even when they are linked to Objects
# (we can then have the same mesh used with different materials...).
# *Should* work, as FBX always links its materials to Models (i.e. objects).
# XXX However, material indices would probably break...
data_materials = {}
for ob_obj in objects:
# If obj is not a valid object for materials, wrapper will just return an empty tuple...
for ma_s in ob_obj.material_slots:
ma = ma_s.material
if ma is None:
continue # Empty slots!
# Note theoretically, FBX supports any kind of materials, even GLSL shaders etc.
# However, I doubt anything else than Lambert/Phong is really portable!
# Note we want to keep a 'dummy' empty material even when we can't really support it, see T41396.
ma_data = data_materials.setdefault(ma, (get_blenderID_key(ma), []))
ma_data[1].append(ob_obj)
perfmon.step("FBX export prepare: Wrapping Textures...")
# Note FBX textures also hold their mapping info.
# TODO: Support layers?
data_textures = {}
# FbxVideo also used to store static images...
data_videos = {}
# For now, do not use world textures, don't think they can be linked to anything FBX wise...
for ma in data_materials.keys():
# Note: with nodal shaders, we'll could be generating much more textures, but that's kind of unavoidable,
# given that textures actually do not exist anymore in material context in Blender...
ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=True)
for sock_name, fbx_name in PRINCIPLED_TEXTURE_SOCKETS_TO_FBX:
tex = getattr(ma_wrap, sock_name)
if tex is None or tex.image is None:
continue
blender_tex_key = (ma, sock_name)
data_textures[blender_tex_key] = (get_blender_nodetexture_key(*blender_tex_key), fbx_name)
img = tex.image
vid_data = data_videos.setdefault(img, (get_blenderID_key(img), []))
vid_data[1].append(blender_tex_key)
perfmon.step("FBX export prepare: Wrapping Animations...")
# Animation...
animations = ()
animated = set()
frame_start = scene.frame_start
frame_end = scene.frame_end
if settings.bake_anim:
# From objects & bones only for a start.
# Kind of hack, we need a temp scene_data for object's space handling to bake animations...
tmp_scdata = FBXExportData(
None, None, None,
settings, scene, depsgraph, objects, None, None, 0.0, 0.0,
data_empties, data_lights, data_cameras, data_meshes, None,
data_bones, data_leaf_bones, data_deformers_skin, data_deformers_shape,
data_world, data_materials, data_textures, data_videos,
)
animations, animated, frame_start, frame_end = fbx_animations(tmp_scdata)
# ##### Creation of templates...
perfmon.step("FBX export prepare: Generating templates...")
templates = {}
templates[b"GlobalSettings"] = fbx_template_def_globalsettings(scene, settings, nbr_users=1)
if data_empties:
templates[b"Null"] = fbx_template_def_null(scene, settings, nbr_users=len(data_empties))
if data_lights:
templates[b"Light"] = fbx_template_def_light(scene, settings, nbr_users=len(data_lights))
if data_cameras:
templates[b"Camera"] = fbx_template_def_camera(scene, settings, nbr_users=len(data_cameras))
if data_bones:
templates[b"Bone"] = fbx_template_def_bone(scene, settings, nbr_users=len(data_bones))
if data_meshes:
nbr = len({me_key for me_key, _me, _free in data_meshes.values()})
if data_deformers_shape:
nbr += sum(len(shapes[2]) for shapes in data_deformers_shape.values())
templates[b"Geometry"] = fbx_template_def_geometry(scene, settings, nbr_users=nbr)
if objects:
templates[b"Model"] = fbx_template_def_model(scene, settings, nbr_users=len(objects))
if arm_parents:
# Number of Pose|BindPose elements should be the same as number of meshes-parented-to-armatures
templates[b"BindPose"] = fbx_template_def_pose(scene, settings, nbr_users=len(arm_parents))
if data_deformers_skin or data_deformers_shape:
nbr = 0
if data_deformers_skin:
nbr += len(data_deformers_skin)
nbr += sum(len(clusters) for def_me in data_deformers_skin.values() for a, b, clusters in def_me.values())
if data_deformers_shape:
nbr += len(data_deformers_shape)
nbr += sum(len(shapes[2]) for shapes in data_deformers_shape.values())
assert(nbr != 0)
templates[b"Deformers"] = fbx_template_def_deformer(scene, settings, nbr_users=nbr)
# No world support in FBX...
"""
if data_world:
templates[b"World"] = fbx_template_def_world(scene, settings, nbr_users=len(data_world))
"""
if data_materials:
templates[b"Material"] = fbx_template_def_material(scene, settings, nbr_users=len(data_materials))
if data_textures:
templates[b"TextureFile"] = fbx_template_def_texture_file(scene, settings, nbr_users=len(data_textures))
if data_videos:
templates[b"Video"] = fbx_template_def_video(scene, settings, nbr_users=len(data_videos))
if animations:
nbr_astacks = len(animations)
nbr_acnodes = 0
nbr_acurves = 0
for _astack_key, astack, _al, _n, _fs, _fe in animations:
for _alayer_key, alayer in astack.values():
for _acnode_key, acnode, _acnode_name in alayer.values():
nbr_acnodes += 1
for _acurve_key, _dval, acurve, acurve_valid in acnode.values():
if acurve:
nbr_acurves += 1
templates[b"AnimationStack"] = fbx_template_def_animstack(scene, settings, nbr_users=nbr_astacks)
# Would be nice to have one layer per animated object, but this seems tricky and not that well supported.
# So for now, only one layer per anim stack.
templates[b"AnimationLayer"] = fbx_template_def_animlayer(scene, settings, nbr_users=nbr_astacks)
templates[b"AnimationCurveNode"] = fbx_template_def_animcurvenode(scene, settings, nbr_users=nbr_acnodes)
templates[b"AnimationCurve"] = fbx_template_def_animcurve(scene, settings, nbr_users=nbr_acurves)
templates_users = sum(tmpl.nbr_users for tmpl in templates.values())
# ##### Creation of connections...
perfmon.step("FBX export prepare: Generating Connections...")
connections = []
# Objects (with classical parenting).
for ob_obj in objects:
# Bones are handled later.
if not ob_obj.is_bone:
par_obj = ob_obj.parent
# Meshes parented to armature are handled separately, yet we want the 'no parent' connection (0).
if par_obj and ob_obj.has_valid_parent(objects) and (par_obj, ob_obj) not in arm_parents:
connections.append((b"OO", ob_obj.fbx_uuid, par_obj.fbx_uuid, None))
else:
connections.append((b"OO", ob_obj.fbx_uuid, 0, None))
# Armature & Bone chains.
for bo_obj in data_bones.keys():
par_obj = bo_obj.parent
if par_obj not in objects:
continue
connections.append((b"OO", bo_obj.fbx_uuid, par_obj.fbx_uuid, None))
# Object data.
for ob_obj in objects:
if ob_obj.is_bone:
bo_data_key = data_bones[ob_obj]
connections.append((b"OO", get_fbx_uuid_from_key(bo_data_key), ob_obj.fbx_uuid, None))
else:
if ob_obj.type == 'LIGHT':
light_key = data_lights[ob_obj.bdata.data]
connections.append((b"OO", get_fbx_uuid_from_key(light_key), ob_obj.fbx_uuid, None))
elif ob_obj.type == 'CAMERA':
cam_key = data_cameras[ob_obj]
connections.append((b"OO", get_fbx_uuid_from_key(cam_key), ob_obj.fbx_uuid, None))
elif ob_obj.type == 'EMPTY' or ob_obj.type == 'ARMATURE':
empty_key = data_empties[ob_obj]
connections.append((b"OO", get_fbx_uuid_from_key(empty_key), ob_obj.fbx_uuid, None))
elif ob_obj.type in BLENDER_OBJECT_TYPES_MESHLIKE:
mesh_key, _me, _free = data_meshes[ob_obj]
connections.append((b"OO", get_fbx_uuid_from_key(mesh_key), ob_obj.fbx_uuid, None))
# Leaf Bones
for (_node_name, par_uuid, node_uuid, attr_uuid, _matrix, _hide, _size) in data_leaf_bones:
connections.append((b"OO", node_uuid, par_uuid, None))
connections.append((b"OO", attr_uuid, node_uuid, None))
# 'Shape' deformers (shape keys, only for meshes currently)...
for me_key, shapes_key, shapes in data_deformers_shape.values():
# shape -> geometry
connections.append((b"OO", get_fbx_uuid_from_key(shapes_key), get_fbx_uuid_from_key(me_key), None))
for channel_key, geom_key, _shape_verts_co, _shape_verts_idx in shapes.values():
# shape channel -> shape
connections.append((b"OO", get_fbx_uuid_from_key(channel_key), get_fbx_uuid_from_key(shapes_key), None))
# geometry (keys) -> shape channel
connections.append((b"OO", get_fbx_uuid_from_key(geom_key), get_fbx_uuid_from_key(channel_key), None))
# 'Skin' deformers (armature-to-geometry, only for meshes currently)...
for arm, deformed_meshes in data_deformers_skin.items():
for me, (skin_key, ob_obj, clusters) in deformed_meshes.items():
# skin -> geometry
mesh_key, _me, _free = data_meshes[ob_obj]
assert(me == _me)
connections.append((b"OO", get_fbx_uuid_from_key(skin_key), get_fbx_uuid_from_key(mesh_key), None))
for bo_obj, clstr_key in clusters.items():
# cluster -> skin
connections.append((b"OO", get_fbx_uuid_from_key(clstr_key), get_fbx_uuid_from_key(skin_key), None))
# bone -> cluster
connections.append((b"OO", bo_obj.fbx_uuid, get_fbx_uuid_from_key(clstr_key), None))
# Materials
mesh_material_indices = {}
_objs_indices = {}
for ma, (ma_key, ob_objs) in data_materials.items():
for ob_obj in ob_objs:
connections.append((b"OO", get_fbx_uuid_from_key(ma_key), ob_obj.fbx_uuid, None))
# Get index of this material for this object (or dupliobject).
# Material indices for mesh faces are determined by their order in 'ma to ob' connections.
# Only materials for meshes currently...
# Note in case of dupliobjects a same me/ma idx will be generated several times...
# Should not be an issue in practice, and it's needed in case we export duplis but not the original!
if ob_obj.type not in BLENDER_OBJECT_TYPES_MESHLIKE:
continue
_mesh_key, me, _free = data_meshes[ob_obj]
idx = _objs_indices[ob_obj] = _objs_indices.get(ob_obj, -1) + 1
mesh_material_indices.setdefault(me, {})[ma] = idx
del _objs_indices
# Textures
for (ma, sock_name), (tex_key, fbx_prop) in data_textures.items():
ma_key, _ob_objs = data_materials[ma]
# texture -> material properties
connections.append((b"OP", get_fbx_uuid_from_key(tex_key), get_fbx_uuid_from_key(ma_key), fbx_prop))
# Images
for vid, (vid_key, blender_tex_keys) in data_videos.items():
for blender_tex_key in blender_tex_keys:
tex_key, _fbx_prop = data_textures[blender_tex_key]
connections.append((b"OO", get_fbx_uuid_from_key(vid_key), get_fbx_uuid_from_key(tex_key), None))
# Animations
for astack_key, astack, alayer_key, _name, _fstart, _fend in animations:
# Animstack itself is linked nowhere!
astack_id = get_fbx_uuid_from_key(astack_key)
# For now, only one layer!
alayer_id = get_fbx_uuid_from_key(alayer_key)
connections.append((b"OO", alayer_id, astack_id, None))
for elem_key, (alayer_key, acurvenodes) in astack.items():
elem_id = get_fbx_uuid_from_key(elem_key)
# Animlayer -> animstack.
# alayer_id = get_fbx_uuid_from_key(alayer_key)
# connections.append((b"OO", alayer_id, astack_id, None))
for fbx_prop, (acurvenode_key, acurves, acurvenode_name) in acurvenodes.items():
# Animcurvenode -> animalayer.
acurvenode_id = get_fbx_uuid_from_key(acurvenode_key)
connections.append((b"OO", acurvenode_id, alayer_id, None))
# Animcurvenode -> object property.
connections.append((b"OP", acurvenode_id, elem_id, fbx_prop.encode()))
for fbx_item, (acurve_key, default_value, acurve, acurve_valid) in acurves.items():
if acurve:
# Animcurve -> Animcurvenode.
connections.append((b"OP", get_fbx_uuid_from_key(acurve_key), acurvenode_id, fbx_item.encode()))
perfmon.level_down()
# ##### And pack all this!
return FBXExportData(
templates, templates_users, connections,
settings, scene, depsgraph, objects, animations, animated, frame_start, frame_end,
data_empties, data_lights, data_cameras, data_meshes, mesh_material_indices,
data_bones, data_leaf_bones, data_deformers_skin, data_deformers_shape,
data_world, data_materials, data_textures, data_videos,
)
def fbx_scene_data_cleanup(scene_data):
"""
Some final cleanup...
"""
# Delete temp meshes.
done_meshes = set()
for me_key, me, free in scene_data.data_meshes.values():
if free and me_key not in done_meshes:
bpy.data.meshes.remove(me)
done_meshes.add(me_key)
# ##### Top-level FBX elements generators. #####
def fbx_header_elements(root, scene_data, time=None):
"""
Write boiling code of FBX root.
time is expected to be a datetime.datetime object, or None (using now() in this case).
"""
app_vendor = "Blender Foundation"
app_name = "Blender (stable FBX IO)"
app_ver = bpy.app.version_string
import addon_utils
import sys
addon_ver = addon_utils.module_bl_info(sys.modules[__package__])['version']
# ##### Start of FBXHeaderExtension element.
header_ext = elem_empty(root, b"FBXHeaderExtension")
elem_data_single_int32(header_ext, b"FBXHeaderVersion", FBX_HEADER_VERSION)
elem_data_single_int32(header_ext, b"FBXVersion", FBX_VERSION)
# No encryption!
elem_data_single_int32(header_ext, b"EncryptionType", 0)
if time is None:
time = datetime.datetime.now()
elem = elem_empty(header_ext, b"CreationTimeStamp")
elem_data_single_int32(elem, b"Version", 1000)
elem_data_single_int32(elem, b"Year", time.year)
elem_data_single_int32(elem, b"Month", time.month)
elem_data_single_int32(elem, b"Day", time.day)
elem_data_single_int32(elem, b"Hour", time.hour)
elem_data_single_int32(elem, b"Minute", time.minute)
elem_data_single_int32(elem, b"Second", time.second)
elem_data_single_int32(elem, b"Millisecond", time.microsecond // 1000)
elem_data_single_string_unicode(header_ext, b"Creator", "%s - %s - %d.%d.%d"
% (app_name, app_ver, addon_ver[0], addon_ver[1], addon_ver[2]))
# 'SceneInfo' seems mandatory to get a valid FBX file...
# TODO use real values!
# XXX Should we use scene.name.encode() here?
scene_info = elem_data_single_string(header_ext, b"SceneInfo", fbx_name_class(b"GlobalInfo", b"SceneInfo"))
scene_info.add_string(b"UserData")
elem_data_single_string(scene_info, b"Type", b"UserData")
elem_data_single_int32(scene_info, b"Version", FBX_SCENEINFO_VERSION)
meta_data = elem_empty(scene_info, b"MetaData")
elem_data_single_int32(meta_data, b"Version", FBX_SCENEINFO_VERSION)
elem_data_single_string(meta_data, b"Title", b"")
elem_data_single_string(meta_data, b"Subject", b"")
elem_data_single_string(meta_data, b"Author", b"")
elem_data_single_string(meta_data, b"Keywords", b"")
elem_data_single_string(meta_data, b"Revision", b"")
elem_data_single_string(meta_data, b"Comment", b"")
props = elem_properties(scene_info)
elem_props_set(props, "p_string_url", b"DocumentUrl", "/foobar.fbx")
elem_props_set(props, "p_string_url", b"SrcDocumentUrl", "/foobar.fbx")
original = elem_props_compound(props, b"Original")
original("p_string", b"ApplicationVendor", app_vendor)
original("p_string", b"ApplicationName", app_name)
original("p_string", b"ApplicationVersion", app_ver)
original("p_datetime", b"DateTime_GMT", "01/01/1970 00:00:00.000")
original("p_string", b"FileName", "/foobar.fbx")
lastsaved = elem_props_compound(props, b"LastSaved")
lastsaved("p_string", b"ApplicationVendor", app_vendor)
lastsaved("p_string", b"ApplicationName", app_name)
lastsaved("p_string", b"ApplicationVersion", app_ver)
lastsaved("p_datetime", b"DateTime_GMT", "01/01/1970 00:00:00.000")
# ##### End of FBXHeaderExtension element.
# FileID is replaced by dummy value currently...
elem_data_single_bytes(root, b"FileId", b"FooBar")
# CreationTime is replaced by dummy value currently, but anyway...
elem_data_single_string_unicode(root, b"CreationTime",
"{:04}-{:02}-{:02} {:02}:{:02}:{:02}:{:03}"
"".format(time.year, time.month, time.day, time.hour, time.minute, time.second,
time.microsecond * 1000))
elem_data_single_string_unicode(root, b"Creator", "%s - %s - %d.%d.%d"
% (app_name, app_ver, addon_ver[0], addon_ver[1], addon_ver[2]))
# ##### Start of GlobalSettings element.
global_settings = elem_empty(root, b"GlobalSettings")
scene = scene_data.scene
elem_data_single_int32(global_settings, b"Version", 1000)
props = elem_properties(global_settings)
up_axis, front_axis, coord_axis = RIGHT_HAND_AXES[scene_data.settings.to_axes]
#~ # DO NOT take into account global scale here! That setting is applied to object transformations during export
#~ # (in other words, this is pure blender-exporter feature, and has nothing to do with FBX data).
#~ if scene_data.settings.apply_unit_scale:
#~ # Unit scaling is applied to objects' scale, so our unit is effectively FBX one (centimeter).
#~ scale_factor_org = 1.0
#~ scale_factor = 1.0 / units_blender_to_fbx_factor(scene)
#~ else:
#~ scale_factor_org = units_blender_to_fbx_factor(scene)
#~ scale_factor = scale_factor_org
scale_factor = scale_factor_org = scene_data.settings.unit_scale
elem_props_set(props, "p_integer", b"UpAxis", up_axis[0])
elem_props_set(props, "p_integer", b"UpAxisSign", up_axis[1])
elem_props_set(props, "p_integer", b"FrontAxis", front_axis[0])
elem_props_set(props, "p_integer", b"FrontAxisSign", front_axis[1])
elem_props_set(props, "p_integer", b"CoordAxis", coord_axis[0])
elem_props_set(props, "p_integer", b"CoordAxisSign", coord_axis[1])
elem_props_set(props, "p_integer", b"OriginalUpAxis", -1)
elem_props_set(props, "p_integer", b"OriginalUpAxisSign", 1)
elem_props_set(props, "p_double", b"UnitScaleFactor", scale_factor)
elem_props_set(props, "p_double", b"OriginalUnitScaleFactor", scale_factor_org)
elem_props_set(props, "p_color_rgb", b"AmbientColor", (0.0, 0.0, 0.0))
elem_props_set(props, "p_string", b"DefaultCamera", "Producer Perspective")
# Global timing data.
r = scene.render
_, fbx_fps_mode = FBX_FRAMERATES[0] # Custom framerate.
fbx_fps = fps = r.fps / r.fps_base
for ref_fps, fps_mode in FBX_FRAMERATES:
if similar_values(fps, ref_fps):
fbx_fps = ref_fps
fbx_fps_mode = fps_mode
elem_props_set(props, "p_enum", b"TimeMode", fbx_fps_mode)
elem_props_set(props, "p_timestamp", b"TimeSpanStart", 0)
elem_props_set(props, "p_timestamp", b"TimeSpanStop", FBX_KTIME)
elem_props_set(props, "p_double", b"CustomFrameRate", fbx_fps)
# ##### End of GlobalSettings element.
def fbx_documents_elements(root, scene_data):
"""
Write 'Document' part of FBX root.
Seems like FBX support multiple documents, but until I find examples of such, we'll stick to single doc!
time is expected to be a datetime.datetime object, or None (using now() in this case).
"""
name = scene_data.scene.name
# ##### Start of Documents element.
docs = elem_empty(root, b"Documents")
elem_data_single_int32(docs, b"Count", 1)
doc_uid = get_fbx_uuid_from_key("__FBX_Document__" + name)
doc = elem_data_single_int64(docs, b"Document", doc_uid)
doc.add_string_unicode(name)
doc.add_string_unicode(name)
props = elem_properties(doc)
elem_props_set(props, "p_object", b"SourceObject")
elem_props_set(props, "p_string", b"ActiveAnimStackName", "")
# XXX Some kind of ID? Offset?
# Anyway, as long as we have only one doc, probably not an issue.
elem_data_single_int64(doc, b"RootNode", 0)
def fbx_references_elements(root, scene_data):
"""
Have no idea what references are in FBX currently... Just writing empty element.
"""
docs = elem_empty(root, b"References")
def fbx_definitions_elements(root, scene_data):
"""
Templates definitions. Only used by Objects data afaik (apart from dummy GlobalSettings one).
"""
definitions = elem_empty(root, b"Definitions")
elem_data_single_int32(definitions, b"Version", FBX_TEMPLATES_VERSION)
elem_data_single_int32(definitions, b"Count", scene_data.templates_users)
fbx_templates_generate(definitions, scene_data.templates)
def fbx_objects_elements(root, scene_data):
"""
Data (objects, geometry, material, textures, armatures, etc.).
"""
perfmon = PerfMon()
perfmon.level_up()
objects = elem_empty(root, b"Objects")
perfmon.step("FBX export fetch empties (%d)..." % len(scene_data.data_empties))
for empty in scene_data.data_empties:
fbx_data_empty_elements(objects, empty, scene_data)
perfmon.step("FBX export fetch lamps (%d)..." % len(scene_data.data_lights))
for lamp in scene_data.data_lights:
fbx_data_light_elements(objects, lamp, scene_data)
perfmon.step("FBX export fetch cameras (%d)..." % len(scene_data.data_cameras))
for cam in scene_data.data_cameras:
fbx_data_camera_elements(objects, cam, scene_data)
perfmon.step("FBX export fetch meshes (%d)..."
% len({me_key for me_key, _me, _free in scene_data.data_meshes.values()}))
done_meshes = set()
for me_obj in scene_data.data_meshes:
fbx_data_mesh_elements(objects, me_obj, scene_data, done_meshes)
del done_meshes
perfmon.step("FBX export fetch objects (%d)..." % len(scene_data.objects))
for ob_obj in scene_data.objects:
if ob_obj.is_dupli:
continue
fbx_data_object_elements(objects, ob_obj, scene_data)
for dp_obj in ob_obj.dupli_list_gen(scene_data.depsgraph):
if dp_obj not in scene_data.objects:
continue
fbx_data_object_elements(objects, dp_obj, scene_data)
perfmon.step("FBX export fetch remaining...")
for ob_obj in scene_data.objects:
if not (ob_obj.is_object and ob_obj.type == 'ARMATURE'):
continue
fbx_data_armature_elements(objects, ob_obj, scene_data)
if scene_data.data_leaf_bones:
fbx_data_leaf_bone_elements(objects, scene_data)
for ma in scene_data.data_materials:
fbx_data_material_elements(objects, ma, scene_data)
for blender_tex_key in scene_data.data_textures:
fbx_data_texture_file_elements(objects, blender_tex_key, scene_data)
for vid in scene_data.data_videos:
fbx_data_video_elements(objects, vid, scene_data)
perfmon.step("FBX export fetch animations...")
start_time = time.process_time()
fbx_data_animation_elements(objects, scene_data)
perfmon.level_down()
def fbx_connections_elements(root, scene_data):
"""
Relations between Objects (which material uses which texture, and so on).
"""
connections = elem_empty(root, b"Connections")
for c in scene_data.connections:
elem_connection(connections, *c)
def fbx_takes_elements(root, scene_data):
"""
Animations.
"""
# XXX Pretty sure takes are no more needed...
takes = elem_empty(root, b"Takes")
elem_data_single_string(takes, b"Current", b"")
animations = scene_data.animations
for astack_key, animations, alayer_key, name, f_start, f_end in animations:
scene = scene_data.scene
fps = scene.render.fps / scene.render.fps_base
start_ktime = int(convert_sec_to_ktime(f_start / fps))
end_ktime = int(convert_sec_to_ktime(f_end / fps))
take = elem_data_single_string(takes, b"Take", name)
elem_data_single_string(take, b"FileName", name + b".tak")
take_loc_time = elem_data_single_int64(take, b"LocalTime", start_ktime)
take_loc_time.add_int64(end_ktime)
take_ref_time = elem_data_single_int64(take, b"ReferenceTime", start_ktime)
take_ref_time.add_int64(end_ktime)
# ##### "Main" functions. #####
# This func can be called with just the filepath
def save_single(operator, scene, depsgraph, filepath="",
global_matrix=Matrix(),
apply_unit_scale=False,
global_scale=1.0,
apply_scale_options='FBX_SCALE_NONE',
axis_up="Z",
axis_forward="Y",
context_objects=None,
object_types=None,
use_mesh_modifiers=True,
use_mesh_modifiers_render=True,
mesh_smooth_type='FACE',
use_subsurf=False,
use_armature_deform_only=False,
bake_anim=True,
bake_anim_use_all_bones=True,
bake_anim_use_nla_strips=True,
bake_anim_use_all_actions=True,
bake_anim_step=1.0,
bake_anim_simplify_factor=1.0,
bake_anim_force_startend_keying=True,
add_leaf_bones=False,
primary_bone_axis='Y',
secondary_bone_axis='X',
use_metadata=True,
path_mode='AUTO',
use_mesh_edges=True,
use_tspace=True,
embed_textures=False,
use_custom_props=False,
bake_space_transform=False,
armature_nodetype='NULL',
**kwargs
):
# Clear cached ObjectWrappers (just in case...).
ObjectWrapper.cache_clear()
if object_types is None:
object_types = {'EMPTY', 'CAMERA', 'LIGHT', 'ARMATURE', 'MESH', 'OTHER'}
if 'OTHER' in object_types:
object_types |= BLENDER_OTHER_OBJECT_TYPES
# Default Blender unit is equivalent to meter, while FBX one is centimeter...
unit_scale = units_blender_to_fbx_factor(scene) if apply_unit_scale else 100.0
if apply_scale_options == 'FBX_SCALE_NONE':
global_matrix = Matrix.Scale(unit_scale * global_scale, 4) @ global_matrix
unit_scale = 1.0
elif apply_scale_options == 'FBX_SCALE_UNITS':
global_matrix = Matrix.Scale(global_scale, 4) @ global_matrix
elif apply_scale_options == 'FBX_SCALE_CUSTOM':
global_matrix = Matrix.Scale(unit_scale, 4) @ global_matrix
unit_scale = global_scale
else: # if apply_scale_options == 'FBX_SCALE_ALL':
unit_scale = global_scale * unit_scale
global_scale = global_matrix.median_scale
global_matrix_inv = global_matrix.inverted()
# For transforming mesh normals.
global_matrix_inv_transposed = global_matrix_inv.transposed()
# Only embed textures in COPY mode!
if embed_textures and path_mode != 'COPY':
embed_textures = False
# Calculate bone correction matrix
bone_correction_matrix = None # Default is None = no change
bone_correction_matrix_inv = None
if (primary_bone_axis, secondary_bone_axis) != ('Y', 'X'):
from bpy_extras.io_utils import axis_conversion
bone_correction_matrix = axis_conversion(from_forward=secondary_bone_axis,
from_up=primary_bone_axis,
to_forward='X',
to_up='Y',
).to_4x4()
bone_correction_matrix_inv = bone_correction_matrix.inverted()
media_settings = FBXExportSettingsMedia(
path_mode,
os.path.dirname(bpy.data.filepath), # base_src
os.path.dirname(filepath), # base_dst
# Local dir where to put images (media), using FBX conventions.
os.path.splitext(os.path.basename(filepath))[0] + ".fbm", # subdir
embed_textures,
set(), # copy_set
set(), # embedded_set
)
settings = FBXExportSettings(
operator.report, (axis_up, axis_forward), global_matrix, global_scale, apply_unit_scale, unit_scale,
bake_space_transform, global_matrix_inv, global_matrix_inv_transposed,
context_objects, object_types, use_mesh_modifiers, use_mesh_modifiers_render,
mesh_smooth_type, use_subsurf, use_mesh_edges, use_tspace,
armature_nodetype, use_armature_deform_only,
add_leaf_bones, bone_correction_matrix, bone_correction_matrix_inv,
bake_anim, bake_anim_use_all_bones, bake_anim_use_nla_strips, bake_anim_use_all_actions,
bake_anim_step, bake_anim_simplify_factor, bake_anim_force_startend_keying,
False, media_settings, use_custom_props,
)
import bpy_extras.io_utils
print('\nFBX export starting... %r' % filepath)
start_time = time.process_time()
# Generate some data about exported scene...
scene_data = fbx_data_from_scene(scene, depsgraph, settings)
root = elem_empty(None, b"") # Root element has no id, as it is not saved per se!
# Mostly FBXHeaderExtension and GlobalSettings.
fbx_header_elements(root, scene_data)
# Documents and References are pretty much void currently.
fbx_documents_elements(root, scene_data)
fbx_references_elements(root, scene_data)
# Templates definitions.
fbx_definitions_elements(root, scene_data)
# Actual data.
fbx_objects_elements(root, scene_data)
# How data are inter-connected.
fbx_connections_elements(root, scene_data)
# Animation.
fbx_takes_elements(root, scene_data)
# Cleanup!
fbx_scene_data_cleanup(scene_data)
# And we are down, we can write the whole thing!
encode_bin.write(filepath, root, FBX_VERSION)
# Clear cached ObjectWrappers!
ObjectWrapper.cache_clear()
# copy all collected files, if we did not embed them.
if not media_settings.embed_textures:
bpy_extras.io_utils.path_reference_copy(media_settings.copy_set)
print('export finished in %.4f sec.' % (time.process_time() - start_time))
return {'FINISHED'}
# defaults for applications, currently only unity but could add others.
def defaults_unity3d():
return {
# These options seem to produce the same result as the old Ascii exporter in Unity3D:
"axis_up": 'Y',
"axis_forward": '-Z',
"global_matrix": Matrix.Rotation(-math.pi / 2.0, 4, 'X'),
# Should really be True, but it can cause problems if a model is already in a scene or prefab
# with the old transforms.
"bake_space_transform": False,
"use_selection": False,
"object_types": {'ARMATURE', 'EMPTY', 'MESH', 'OTHER'},
"use_mesh_modifiers": True,
"use_mesh_modifiers_render": True,
"use_mesh_edges": False,
"mesh_smooth_type": 'FACE',
"use_subsurf": False,
"use_tspace": False, # XXX Why? Unity is expected to support tspace import...
"use_armature_deform_only": True,
"use_custom_props": True,
"bake_anim": True,
"bake_anim_simplify_factor": 1.0,
"bake_anim_step": 1.0,
"bake_anim_use_nla_strips": True,
"bake_anim_use_all_actions": True,
"add_leaf_bones": False, # Avoid memory/performance cost for something only useful for modelling
"primary_bone_axis": 'Y', # Doesn't really matter for Unity, so leave unchanged
"secondary_bone_axis": 'X',
"path_mode": 'AUTO',
"embed_textures": False,
"batch_mode": 'OFF',
}
def save(operator, context,
filepath="",
use_selection=False,
use_active_collection=False,
batch_mode='OFF',
use_batch_own_dir=False,
**kwargs
):
"""
This is a wrapper around save_single, which handles multi-scenes (or collections) cases, when batch-exporting
a whole .blend file.
"""
ret = {'FINISHED'}
active_object = context.view_layer.objects.active
org_mode = None
if active_object and active_object.mode != 'OBJECT' and bpy.ops.object.mode_set.poll():
org_mode = active_object.mode
bpy.ops.object.mode_set(mode='OBJECT')
if batch_mode == 'OFF':
kwargs_mod = kwargs.copy()
if use_active_collection:
if use_selection:
ctx_objects = tuple(obj
for obj in context.view_layer.active_layer_collection.collection.all_objects
if obj.select_get())
else:
ctx_objects = context.view_layer.active_layer_collection.collection.all_objects
else:
if use_selection:
ctx_objects = context.selected_objects
else:
ctx_objects = context.view_layer.objects
kwargs_mod["context_objects"] = ctx_objects
depsgraph = context.evaluated_depsgraph_get()
ret = save_single(operator, context.scene, depsgraph, filepath, **kwargs_mod)
else:
# XXX We need a way to generate a depsgraph for inactive view_layers first...
# XXX Also, what to do in case of batch-exporting scenes, when there is more than one view layer?
# Scenes have no concept of 'active' view layer, that's on window level...
fbxpath = filepath
prefix = os.path.basename(fbxpath)
if prefix:
fbxpath = os.path.dirname(fbxpath)
if batch_mode == 'COLLECTION':
data_seq = tuple((coll, coll.name, 'objects') for coll in bpy.data.collections if coll.objects)
elif batch_mode in {'SCENE_COLLECTION', 'ACTIVE_SCENE_COLLECTION'}:
scenes = [context.scene] if batch_mode == 'ACTIVE_SCENE_COLLECTION' else bpy.data.scenes
data_seq = []
for scene in scenes:
if not scene.objects:
continue
# Needed to avoid having tens of 'Master Collection' entries.
todo_collections = [(scene.collection, "_".join((scene.name, scene.collection.name)))]
while todo_collections:
coll, coll_name = todo_collections.pop()
todo_collections.extend(((c, c.name) for c in coll.children if c.all_objects))
data_seq.append((coll, coll_name, 'all_objects'))
else:
data_seq = tuple((scene, scene.name, 'objects') for scene in bpy.data.scenes if scene.objects)
# call this function within a loop with BATCH_ENABLE == False
new_fbxpath = fbxpath # own dir option modifies, we need to keep an original
for data, data_name, data_obj_propname in data_seq: # scene or collection
newname = "_".join((prefix, bpy.path.clean_name(data_name))) if prefix else bpy.path.clean_name(data_name)
if use_batch_own_dir:
new_fbxpath = os.path.join(fbxpath, newname)
# path may already exist... and be a file.
while os.path.isfile(new_fbxpath):
new_fbxpath = "_".join((new_fbxpath, "dir"))
if not os.path.exists(new_fbxpath):
os.makedirs(new_fbxpath)
filepath = os.path.join(new_fbxpath, newname + '.fbx')
print('\nBatch exporting %s as...\n\t%r' % (data, filepath))
if batch_mode in {'COLLECTION', 'SCENE_COLLECTION', 'ACTIVE_SCENE_COLLECTION'}:
# Collection, so that objects update properly, add a dummy scene.
scene = bpy.data.scenes.new(name="FBX_Temp")
src_scenes = {} # Count how much each 'source' scenes are used.
for obj in getattr(data, data_obj_propname):
for src_sce in obj.users_scene:
src_scenes[src_sce] = src_scenes.setdefault(src_sce, 0) + 1
scene.collection.objects.link(obj)
# Find the 'most used' source scene, and use its unit settings. This is somewhat weak, but should work
# fine in most cases, and avoids stupid issues like T41931.
best_src_scene = None
best_src_scene_users = -1
for sce, nbr_users in src_scenes.items():
if (nbr_users) > best_src_scene_users:
best_src_scene_users = nbr_users
best_src_scene = sce
scene.unit_settings.system = best_src_scene.unit_settings.system
scene.unit_settings.system_rotation = best_src_scene.unit_settings.system_rotation
scene.unit_settings.scale_length = best_src_scene.unit_settings.scale_length
# new scene [only one viewlayer to update]
scene.view_layers[0].update()
# TODO - BUMMER! Armatures not in the group wont animate the mesh
else:
scene = data
kwargs_batch = kwargs.copy()
kwargs_batch["context_objects"] = getattr(data, data_obj_propname)
save_single(operator, scene, scene.view_layers[0].depsgraph, filepath, **kwargs_batch)
if batch_mode in {'COLLECTION', 'SCENE_COLLECTION', 'ACTIVE_SCENE_COLLECTION'}:
# Remove temp collection scene.
bpy.data.scenes.remove(scene)
if active_object and org_mode:
context.view_layer.objects.active = active_object
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode=org_mode)
return ret
| 47.320146
| 137
| 0.648977
|
importlib.reload(fbx_utils)
import bpy
import bpy_extras
from bpy_extras import node_shader_utils
from mathutils import Vector, Matrix
from . import encode_bin, data_types, fbx_utils
from .fbx_utils import (
FBX_VERSION, FBX_HEADER_VERSION, FBX_SCENEINFO_VERSION, FBX_TEMPLATES_VERSION,
FBX_MODELS_VERSION,
FBX_GEOMETRY_VERSION, FBX_GEOMETRY_NORMAL_VERSION, FBX_GEOMETRY_BINORMAL_VERSION, FBX_GEOMETRY_TANGENT_VERSION,
FBX_GEOMETRY_SMOOTHING_VERSION, FBX_GEOMETRY_CREASE_VERSION, FBX_GEOMETRY_VCOLOR_VERSION, FBX_GEOMETRY_UV_VERSION,
FBX_GEOMETRY_MATERIAL_VERSION, FBX_GEOMETRY_LAYER_VERSION,
FBX_GEOMETRY_SHAPE_VERSION, FBX_DEFORMER_SHAPE_VERSION, FBX_DEFORMER_SHAPECHANNEL_VERSION,
FBX_POSE_BIND_VERSION, FBX_DEFORMER_SKIN_VERSION, FBX_DEFORMER_CLUSTER_VERSION,
FBX_MATERIAL_VERSION, FBX_TEXTURE_VERSION,
FBX_ANIM_KEY_VERSION,
FBX_ANIM_PROPSGROUP_NAME,
FBX_KTIME,
BLENDER_OTHER_OBJECT_TYPES, BLENDER_OBJECT_TYPES_MESHLIKE,
FBX_LIGHT_TYPES, FBX_LIGHT_DECAY_TYPES,
RIGHT_HAND_AXES, FBX_FRAMERATES,
PerfMon,
units_blender_to_fbx_factor, units_convertor, units_convertor_iter,
matrix4_to_array, similar_values, similar_values_iter,
vcos_transformed_gen, nors_transformed_gen,
get_fbx_uuid_from_key,
get_blenderID_key, get_blenderID_name,
get_blender_mesh_shape_key, get_blender_mesh_shape_channel_key,
get_blender_empty_key, get_blender_bone_key,
get_blender_bindpose_key, get_blender_armature_skin_key, get_blender_bone_cluster_key,
get_blender_anim_id_base, get_blender_anim_stack_key, get_blender_anim_layer_key,
get_blender_anim_curve_node_key, get_blender_anim_curve_key,
get_blender_nodetexture_key,
elem_empty,
elem_data_single_bool, elem_data_single_int16, elem_data_single_int32, elem_data_single_int64,
elem_data_single_float32, elem_data_single_float64,
elem_data_single_bytes, elem_data_single_string, elem_data_single_string_unicode,
elem_data_single_bool_array, elem_data_single_int32_array, elem_data_single_int64_array,
elem_data_single_float32_array, elem_data_single_float64_array, elem_data_vec_float64,
elem_properties, elem_props_set, elem_props_compound,
elem_props_template_init, elem_props_template_set, elem_props_template_finalize,
FBXTemplate, fbx_templates_generate,
AnimationCurveNodeWrapper,
ObjectWrapper, fbx_name_class,
FBXExportSettingsMedia, FBXExportSettings, FBXExportData,
)
convert_sec_to_ktime = units_convertor("second", "ktime")
convert_sec_to_ktime_iter = units_convertor_iter("second", "ktime")
convert_mm_to_inch = units_convertor("millimeter", "inch")
convert_rad_to_deg = units_convertor("radian", "degree")
convert_rad_to_deg_iter = units_convertor_iter("radian", "degree")
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"GlobalSettings", b"", props, nbr_users, [False])
def fbx_template_def_model(scene, settings, override_defaults=None, nbr_users=0):
gscale = settings.global_scale
props = {
# Name, Value, Type, Animatable
b"QuaternionInterpolate": (0, "p_enum", False), # 0 = no quat interpolation.
b"RotationOffset": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"RotationPivot": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"ScalingOffset": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"ScalingPivot": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"TranslationActive": (False, "p_bool", False),
b"TranslationMin": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"TranslationMax": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"TranslationMinX": (False, "p_bool", False),
b"TranslationMinY": (False, "p_bool", False),
b"TranslationMinZ": (False, "p_bool", False),
b"TranslationMaxX": (False, "p_bool", False),
b"TranslationMaxY": (False, "p_bool", False),
b"TranslationMaxZ": (False, "p_bool", False),
b"RotationOrder": (0, "p_enum", False), # we always use 'XYZ' order.
b"RotationSpaceForLimitOnly": (False, "p_bool", False),
b"RotationStiffnessX": (0.0, "p_double", False),
b"RotationStiffnessY": (0.0, "p_double", False),
b"RotationStiffnessZ": (0.0, "p_double", False),
b"AxisLen": (10.0, "p_double", False),
b"PreRotation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"PostRotation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"RotationActive": (False, "p_bool", False),
b"RotationMin": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"RotationMax": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"RotationMinX": (False, "p_bool", False),
b"RotationMinY": (False, "p_bool", False),
b"RotationMinZ": (False, "p_bool", False),
b"RotationMaxX": (False, "p_bool", False),
b"RotationMaxY": (False, "p_bool", False),
b"RotationMaxZ": (False, "p_bool", False),
b"InheritType": (0, "p_enum", False), # RrSs
b"ScalingActive": (False, "p_bool", False),
b"ScalingMin": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"ScalingMax": ((1.0, 1.0, 1.0), "p_vector_3d", False),
b"ScalingMinX": (False, "p_bool", False),
b"ScalingMinY": (False, "p_bool", False),
b"ScalingMinZ": (False, "p_bool", False),
b"ScalingMaxX": (False, "p_bool", False),
b"ScalingMaxY": (False, "p_bool", False),
b"ScalingMaxZ": (False, "p_bool", False),
b"GeometricTranslation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"GeometricRotation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"GeometricScaling": ((1.0, 1.0, 1.0), "p_vector_3d", False),
b"MinDampRangeX": (0.0, "p_double", False),
b"MinDampRangeY": (0.0, "p_double", False),
b"MinDampRangeZ": (0.0, "p_double", False),
b"MaxDampRangeX": (0.0, "p_double", False),
b"MaxDampRangeY": (0.0, "p_double", False),
b"MaxDampRangeZ": (0.0, "p_double", False),
b"MinDampStrengthX": (0.0, "p_double", False),
b"MinDampStrengthY": (0.0, "p_double", False),
b"MinDampStrengthZ": (0.0, "p_double", False),
b"MaxDampStrengthX": (0.0, "p_double", False),
b"MaxDampStrengthY": (0.0, "p_double", False),
b"MaxDampStrengthZ": (0.0, "p_double", False),
b"PreferedAngleX": (0.0, "p_double", False),
b"PreferedAngleY": (0.0, "p_double", False),
b"PreferedAngleZ": (0.0, "p_double", False),
b"LookAtProperty": (None, "p_object", False),
b"UpVectorProperty": (None, "p_object", False),
b"Show": (True, "p_bool", False),
b"NegativePercentShapeSupport": (True, "p_bool", False),
b"DefaultAttributeIndex": (-1, "p_integer", False),
b"Freeze": (False, "p_bool", False),
b"LODBox": (False, "p_bool", False),
b"Lcl Translation": ((0.0, 0.0, 0.0), "p_lcl_translation", True),
b"Lcl Rotation": ((0.0, 0.0, 0.0), "p_lcl_rotation", True),
b"Lcl Scaling": ((1.0, 1.0, 1.0), "p_lcl_scaling", True),
b"Visibility": (1.0, "p_visibility", True),
b"Visibility Inheritance": (1, "p_visibility_inheritance", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Model", b"FbxNode", props, nbr_users, [False])
def fbx_template_def_null(scene, settings, override_defaults=None, nbr_users=0):
props = {
b"Color": ((0.8, 0.8, 0.8), "p_color_rgb", False),
b"Size": (100.0, "p_double", False),
b"Look": (1, "p_enum", False), # Cross (0 is None, i.e. invisible?).
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"NodeAttribute", b"FbxNull", props, nbr_users, [False])
def fbx_template_def_light(scene, settings, override_defaults=None, nbr_users=0):
gscale = settings.global_scale
props = {
b"LightType": (0, "p_enum", False), # Point light.
b"CastLight": (True, "p_bool", False),
b"Color": ((1.0, 1.0, 1.0), "p_color", True),
b"Intensity": (100.0, "p_number", True), # Times 100 compared to Blender values...
b"DecayType": (2, "p_enum", False), # Quadratic.
b"DecayStart": (30.0 * gscale, "p_double", False),
b"CastShadows": (True, "p_bool", False),
b"ShadowColor": ((0.0, 0.0, 0.0), "p_color", True),
b"AreaLightShape": (0, "p_enum", False), # Rectangle.
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"NodeAttribute", b"FbxLight", props, nbr_users, [False])
def fbx_template_def_camera(scene, settings, override_defaults=None, nbr_users=0):
r = scene.render
props = {
b"Color": ((0.8, 0.8, 0.8), "p_color_rgb", False),
b"Position": ((0.0, 0.0, -50.0), "p_vector", True),
b"UpVector": ((0.0, 1.0, 0.0), "p_vector", True),
b"InterestPosition": ((0.0, 0.0, 0.0), "p_vector", True),
b"Roll": (0.0, "p_roll", True),
b"OpticalCenterX": (0.0, "p_opticalcenterx", True),
b"OpticalCenterY": (0.0, "p_opticalcentery", True),
b"BackgroundColor": ((0.63, 0.63, 0.63), "p_color", True),
b"TurnTable": (0.0, "p_number", True),
b"DisplayTurnTableIcon": (False, "p_bool", False),
b"UseMotionBlur": (False, "p_bool", False),
b"UseRealTimeMotionBlur": (True, "p_bool", False),
b"Motion Blur Intensity": (1.0, "p_number", True),
b"AspectRatioMode": (0, "p_enum", False), # WindowSize.
b"AspectWidth": (320.0, "p_double", False),
b"AspectHeight": (200.0, "p_double", False),
b"PixelAspectRatio": (1.0, "p_double", False),
b"FilmOffsetX": (0.0, "p_number", True),
b"FilmOffsetY": (0.0, "p_number", True),
b"FilmWidth": (0.816, "p_double", False),
b"FilmHeight": (0.612, "p_double", False),
b"FilmAspectRatio": (1.3333333333333333, "p_double", False),
b"FilmSqueezeRatio": (1.0, "p_double", False),
b"FilmFormatIndex": (0, "p_enum", False), # Assuming this is ApertureFormat, 0 = custom.
b"PreScale": (1.0, "p_number", True),
b"FilmTranslateX": (0.0, "p_number", True),
b"FilmTranslateY": (0.0, "p_number", True),
b"FilmRollPivotX": (0.0, "p_number", True),
b"FilmRollPivotY": (0.0, "p_number", True),
b"FilmRollValue": (0.0, "p_number", True),
b"FilmRollOrder": (0, "p_enum", False), # 0 = rotate first (default).
b"ApertureMode": (2, "p_enum", False), # 2 = Vertical.
b"GateFit": (0, "p_enum", False), # 0 = no resolution gate fit.
b"FieldOfView": (25.114999771118164, "p_fov", True),
b"FieldOfViewX": (40.0, "p_fov_x", True),
b"FieldOfViewY": (40.0, "p_fov_y", True),
b"FocalLength": (34.89327621672628, "p_number", True),
b"CameraFormat": (0, "p_enum", False), # Custom camera format.
b"UseFrameColor": (False, "p_bool", False),
b"FrameColor": ((0.3, 0.3, 0.3), "p_color_rgb", False),
b"ShowName": (True, "p_bool", False),
b"ShowInfoOnMoving": (True, "p_bool", False),
b"ShowGrid": (True, "p_bool", False),
b"ShowOpticalCenter": (False, "p_bool", False),
b"ShowAzimut": (True, "p_bool", False),
b"ShowTimeCode": (False, "p_bool", False),
b"ShowAudio": (False, "p_bool", False),
b"AudioColor": ((0.0, 1.0, 0.0), "p_vector_3d", False), # Yep, vector3d, not corlorgb… :cry:
b"NearPlane": (10.0, "p_double", False),
b"FarPlane": (4000.0, "p_double", False),
b"AutoComputeClipPanes": (False, "p_bool", False),
b"ViewCameraToLookAt": (True, "p_bool", False),
b"ViewFrustumNearFarPlane": (False, "p_bool", False),
b"ViewFrustumBackPlaneMode": (2, "p_enum", False), # 2 = show back plane if texture added.
b"BackPlaneDistance": (4000.0, "p_number", True),
b"BackPlaneDistanceMode": (1, "p_enum", False), # 1 = relative to camera.
b"ViewFrustumFrontPlaneMode": (2, "p_enum", False), # 2 = show front plane if texture added.
b"FrontPlaneDistance": (10.0, "p_number", True),
b"FrontPlaneDistanceMode": (1, "p_enum", False), # 1 = relative to camera.
b"LockMode": (False, "p_bool", False),
b"LockInterestNavigation": (False, "p_bool", False),
# BackPlate... properties **arggggg!**
b"FitImage": (False, "p_bool", False),
b"Crop": (False, "p_bool", False),
b"Center": (True, "p_bool", False),
b"KeepRatio": (True, "p_bool", False),
# End of BackPlate...
b"BackgroundAlphaTreshold": (0.5, "p_double", False),
b"ShowBackplate": (True, "p_bool", False),
b"BackPlaneOffsetX": (0.0, "p_number", True),
b"BackPlaneOffsetY": (0.0, "p_number", True),
b"BackPlaneRotation": (0.0, "p_number", True),
b"BackPlaneScaleX": (1.0, "p_number", True),
b"BackPlaneScaleY": (1.0, "p_number", True),
b"Background Texture": (None, "p_object", False),
b"FrontPlateFitImage": (True, "p_bool", False),
b"FrontPlateCrop": (False, "p_bool", False),
b"FrontPlateCenter": (True, "p_bool", False),
b"FrontPlateKeepRatio": (True, "p_bool", False),
b"Foreground Opacity": (1.0, "p_double", False),
b"ShowFrontplate": (True, "p_bool", False),
b"FrontPlaneOffsetX": (0.0, "p_number", True),
b"FrontPlaneOffsetY": (0.0, "p_number", True),
b"FrontPlaneRotation": (0.0, "p_number", True),
b"FrontPlaneScaleX": (1.0, "p_number", True),
b"FrontPlaneScaleY": (1.0, "p_number", True),
b"Foreground Texture": (None, "p_object", False),
b"DisplaySafeArea": (False, "p_bool", False),
b"DisplaySafeAreaOnRender": (False, "p_bool", False),
b"SafeAreaDisplayStyle": (1, "p_enum", False), # 1 = rounded corners.
b"SafeAreaAspectRatio": (1.3333333333333333, "p_double", False),
b"Use2DMagnifierZoom": (False, "p_bool", False),
b"2D Magnifier Zoom": (100.0, "p_number", True),
b"2D Magnifier X": (50.0, "p_number", True),
b"2D Magnifier Y": (50.0, "p_number", True),
b"CameraProjectionType": (0, "p_enum", False), # 0 = perspective, 1 = orthogonal.
b"OrthoZoom": (1.0, "p_double", False),
b"UseRealTimeDOFAndAA": (False, "p_bool", False),
b"UseDepthOfField": (False, "p_bool", False),
b"FocusSource": (0, "p_enum", False), # 0 = camera interest, 1 = distance from camera interest.
b"FocusAngle": (3.5, "p_double", False), # ???
b"FocusDistance": (200.0, "p_double", False),
b"UseAntialiasing": (False, "p_bool", False),
b"AntialiasingIntensity": (0.77777, "p_double", False),
b"AntialiasingMethod": (0, "p_enum", False), # 0 = oversampling, 1 = hardware.
b"UseAccumulationBuffer": (False, "p_bool", False),
b"FrameSamplingCount": (7, "p_integer", False),
b"FrameSamplingType": (1, "p_enum", False), # 0 = uniform, 1 = stochastic.
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"NodeAttribute", b"FbxCamera", props, nbr_users, [False])
def fbx_template_def_bone(scene, settings, override_defaults=None, nbr_users=0):
props = {}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"NodeAttribute", b"LimbNode", props, nbr_users, [False])
def fbx_template_def_geometry(scene, settings, override_defaults=None, nbr_users=0):
props = {
b"Color": ((0.8, 0.8, 0.8), "p_color_rgb", False),
b"BBoxMin": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"BBoxMax": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"Primary Visibility": (True, "p_bool", False),
b"Casts Shadows": (True, "p_bool", False),
b"Receive Shadows": (True, "p_bool", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Geometry", b"FbxMesh", props, nbr_users, [False])
def fbx_template_def_material(scene, settings, override_defaults=None, nbr_users=0):
# WIP...
props = {
b"ShadingModel": ("Phong", "p_string", False),
b"MultiLayer": (False, "p_bool", False),
# Lambert-specific.
b"EmissiveColor": ((0.0, 0.0, 0.0), "p_color", True),
b"EmissiveFactor": (1.0, "p_number", True),
b"AmbientColor": ((0.2, 0.2, 0.2), "p_color", True),
b"AmbientFactor": (1.0, "p_number", True),
b"DiffuseColor": ((0.8, 0.8, 0.8), "p_color", True),
b"DiffuseFactor": (1.0, "p_number", True),
b"TransparentColor": ((0.0, 0.0, 0.0), "p_color", True),
b"TransparencyFactor": (0.0, "p_number", True),
b"Opacity": (1.0, "p_number", True),
b"NormalMap": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"Bump": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"BumpFactor": (1.0, "p_double", False),
b"DisplacementColor": ((0.0, 0.0, 0.0), "p_color_rgb", False),
b"DisplacementFactor": (1.0, "p_double", False),
b"VectorDisplacementColor": ((0.0, 0.0, 0.0), "p_color_rgb", False),
b"VectorDisplacementFactor": (1.0, "p_double", False),
# Phong-specific.
b"SpecularColor": ((0.2, 0.2, 0.2), "p_color", True),
b"SpecularFactor": (1.0, "p_number", True),
# Not sure about the name, importer uses this (but ShininessExponent for tex prop name!)
# And in fbx exported by sdk, you have one in template, the other in actual material!!! :/
# For now, using both.
b"Shininess": (20.0, "p_number", True),
b"ShininessExponent": (20.0, "p_number", True),
b"ReflectionColor": ((0.0, 0.0, 0.0), "p_color", True),
b"ReflectionFactor": (1.0, "p_number", True),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Material", b"FbxSurfacePhong", props, nbr_users, [False])
def fbx_template_def_texture_file(scene, settings, override_defaults=None, nbr_users=0):
# WIP...
# XXX Not sure about all names!
props = {
b"TextureTypeUse": (0, "p_enum", False), # Standard.
b"AlphaSource": (2, "p_enum", False), # Black (i.e. texture's alpha), XXX name guessed!.
b"Texture alpha": (1.0, "p_double", False),
b"PremultiplyAlpha": (True, "p_bool", False),
b"CurrentTextureBlendMode": (1, "p_enum", False),
b"CurrentMappingType": (0, "p_enum", False),
b"UVSet": ("default", "p_string", False),
b"WrapModeU": (0, "p_enum", False),
b"WrapModeV": (0, "p_enum", False),
b"UVSwap": (False, "p_bool", False),
b"Translation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"Rotation": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"Scaling": ((1.0, 1.0, 1.0), "p_vector_3d", False),
b"TextureRotationPivot": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"TextureScalingPivot": ((0.0, 0.0, 0.0), "p_vector_3d", False),
b"UseMaterial": (False, "p_bool", False),
b"UseMipMap": (False, "p_bool", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Texture", b"FbxFileTexture", props, nbr_users, [False])
def fbx_template_def_video(scene, settings, override_defaults=None, nbr_users=0):
props = {
b"Width": (0, "p_integer", False),
b"Height": (0, "p_integer", False),
b"Path": ("", "p_string_url", False),
b"AccessMode": (0, "p_enum", False),
b"StartFrame": (0, "p_integer", False),
b"StopFrame": (0, "p_integer", False),
b"Offset": (0, "p_timestamp", False),
b"PlaySpeed": (0.0, "p_double", False),
b"FreeRunning": (False, "p_bool", False),
b"Loop": (False, "p_bool", False),
b"InterlaceMode": (0, "p_enum", False),
b"ImageSequence": (False, "p_bool", False),
b"ImageSequenceOffset": (0, "p_integer", False),
b"FrameRate": (0.0, "p_double", False),
b"LastFrame": (0, "p_integer", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Video", b"FbxVideo", props, nbr_users, [False])
def fbx_template_def_pose(scene, settings, override_defaults=None, nbr_users=0):
props = {}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Pose", b"", props, nbr_users, [False])
def fbx_template_def_deformer(scene, settings, override_defaults=None, nbr_users=0):
props = {}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"Deformer", b"", props, nbr_users, [False])
def fbx_template_def_animstack(scene, settings, override_defaults=None, nbr_users=0):
props = {
b"Description": ("", "p_string", False),
b"LocalStart": (0, "p_timestamp", False),
b"LocalStop": (0, "p_timestamp", False),
b"ReferenceStart": (0, "p_timestamp", False),
b"ReferenceStop": (0, "p_timestamp", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"AnimationStack", b"FbxAnimStack", props, nbr_users, [False])
def fbx_template_def_animlayer(scene, settings, override_defaults=None, nbr_users=0):
props = {
b"Weight": (100.0, "p_number", True),
b"Mute": (False, "p_bool", False),
b"Solo": (False, "p_bool", False),
b"Lock": (False, "p_bool", False),
b"Color": ((0.8, 0.8, 0.8), "p_color_rgb", False),
b"BlendMode": (0, "p_enum", False),
b"RotationAccumulationMode": (0, "p_enum", False),
b"ScaleAccumulationMode": (0, "p_enum", False),
b"BlendModeBypass": (0, "p_ulonglong", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"AnimationLayer", b"FbxAnimLayer", props, nbr_users, [False])
def fbx_template_def_animcurvenode(scene, settings, override_defaults=None, nbr_users=0):
props = {
FBX_ANIM_PROPSGROUP_NAME.encode(): (None, "p_compound", False),
}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"AnimationCurveNode", b"FbxAnimCurveNode", props, nbr_users, [False])
def fbx_template_def_animcurve(scene, settings, override_defaults=None, nbr_users=0):
props = {}
if override_defaults is not None:
props.update(override_defaults)
return FBXTemplate(b"AnimationCurve", b"", props, nbr_users, [False])
f prop.is_runtime}
for k, v in items:
if k == '_RNA_UI' or k in rna_properties:
continue
list_val = getattr(v, "to_list", lambda: None)()
if isinstance(v, str):
elem_props_set(props, "p_string", k.encode(), v, custom=True)
elif isinstance(v, int):
elem_props_set(props, "p_integer", k.encode(), v, custom=True)
elif isinstance(v, float):
elem_props_set(props, "p_double", k.encode(), v, custom=True)
elif list_val:
if len(list_val) == 3:
elem_props_set(props, "p_vector", k.encode(), list_val, custom=True)
else:
elem_props_set(props, "p_string", k.encode(), str(list_val), custom=True)
else:
elem_props_set(props, "p_string", k.encode(), str(v), custom=True)
def fbx_data_empty_elements(root, empty, scene_data):
empty_key = scene_data.data_empties[empty]
null = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(empty_key))
null.add_string(fbx_name_class(empty.name.encode(), b"NodeAttribute"))
val = empty.bdata.get('fbx_type', None)
null.add_string(val.encode() if val and isinstance(val, str) else b"Null")
elem_data_single_string(null, b"TypeFlags", b"Null")
tmpl = elem_props_template_init(scene_data.templates, b"Null")
props = elem_properties(null)
elem_props_template_finalize(tmpl, props)
def fbx_data_light_elements(root, lamp, scene_data):
gscale = scene_data.settings.global_scale
light_key = scene_data.data_lights[lamp]
do_light = True
decay_type = FBX_LIGHT_DECAY_TYPES['CONSTANT']
do_shadow = False
shadow_color = Vector((0.0, 0.0, 0.0))
if lamp.type not in {'HEMI'}:
if lamp.type not in {'SUN', 'AREA'}:
decay_type = FBX_LIGHT_DECAY_TYPES[lamp.falloff_type]
do_light = True
do_shadow = lamp.use_shadow
shadow_color = lamp.shadow_color
light = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(light_key))
light.add_string(fbx_name_class(lamp.name.encode(), b"NodeAttribute"))
light.add_string(b"Light")
elem_data_single_int32(light, b"GeometryVersion", FBX_GEOMETRY_VERSION)
tmpl = elem_props_template_init(scene_data.templates, b"Light")
props = elem_properties(light)
elem_props_template_set(tmpl, props, "p_enum", b"LightType", FBX_LIGHT_TYPES[lamp.type])
elem_props_template_set(tmpl, props, "p_bool", b"CastLight", do_light)
elem_props_template_set(tmpl, props, "p_color", b"Color", lamp.color)
elem_props_template_set(tmpl, props, "p_number", b"Intensity", lamp.energy * 100.0)
elem_props_template_set(tmpl, props, "p_enum", b"DecayType", decay_type)
elem_props_template_set(tmpl, props, "p_double", b"DecayStart", lamp.distance * gscale)
elem_props_template_set(tmpl, props, "p_bool", b"CastShadows", do_shadow)
elem_props_template_set(tmpl, props, "p_color", b"ShadowColor", shadow_color)
if lamp.type in {'SPOT'}:
elem_props_template_set(tmpl, props, "p_double", b"OuterAngle", math.degrees(lamp.spot_size))
elem_props_template_set(tmpl, props, "p_double", b"InnerAngle",
math.degrees(lamp.spot_size * (1.0 - lamp.spot_blend)))
elem_props_template_finalize(tmpl, props)
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, lamp)
def fbx_data_camera_elements(root, cam_obj, scene_data):
gscale = scene_data.settings.global_scale
cam = cam_obj.bdata
cam_data = cam.data
cam_key = scene_data.data_cameras[cam_obj]
loc, rot, scale, matrix, matrix_rot = cam_obj.fbx_object_tx(scene_data)
up = matrix_rot @ Vector((0.0, 1.0, 0.0))
to = matrix_rot @ Vector((0.0, 0.0, -1.0))
render = scene_data.scene.render
width = render.resolution_x
height = render.resolution_y
aspect = width / height
filmwidth = convert_mm_to_inch(cam_data.sensor_width)
filmheight = convert_mm_to_inch(cam_data.sensor_height)
filmaspect = filmwidth / filmheight
offsetx = filmwidth * cam_data.shift_x
offsety = filmaspect * filmheight * cam_data.shift_y
cam = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(cam_key))
cam.add_string(fbx_name_class(cam_data.name.encode(), b"NodeAttribute"))
cam.add_string(b"Camera")
tmpl = elem_props_template_init(scene_data.templates, b"Camera")
props = elem_properties(cam)
elem_props_template_set(tmpl, props, "p_vector", b"Position", loc)
elem_props_template_set(tmpl, props, "p_vector", b"UpVector", up)
elem_props_template_set(tmpl, props, "p_vector", b"InterestPosition", loc + to)
elem_props_template_set(tmpl, props, "p_color", b"BackgroundColor", (0.0, 0.0, 0.0))
elem_props_template_set(tmpl, props, "p_bool", b"DisplayTurnTableIcon", True)
elem_props_template_set(tmpl, props, "p_enum", b"AspectRatioMode", 2)
elem_props_template_set(tmpl, props, "p_double", b"AspectWidth", float(render.resolution_x))
elem_props_template_set(tmpl, props, "p_double", b"AspectHeight", float(render.resolution_y))
elem_props_template_set(tmpl, props, "p_double", b"PixelAspectRatio",
float(render.pixel_aspect_x / render.pixel_aspect_y))
elem_props_template_set(tmpl, props, "p_double", b"FilmWidth", filmwidth)
elem_props_template_set(tmpl, props, "p_double", b"FilmHeight", filmheight)
elem_props_template_set(tmpl, props, "p_double", b"FilmAspectRatio", filmaspect)
elem_props_template_set(tmpl, props, "p_double", b"FilmOffsetX", offsetx)
elem_props_template_set(tmpl, props, "p_double", b"FilmOffsetY", offsety)
elem_props_template_set(tmpl, props, "p_enum", b"ApertureMode", 3)
elem_props_template_set(tmpl, props, "p_enum", b"GateFit", 2)
elem_props_template_set(tmpl, props, "p_fov", b"FieldOfView", math.degrees(cam_data.angle_x))
elem_props_template_set(tmpl, props, "p_fov_x", b"FieldOfViewX", math.degrees(cam_data.angle_x))
elem_props_template_set(tmpl, props, "p_fov_y", b"FieldOfViewY", math.degrees(cam_data.angle_y))
elem_props_template_set(tmpl, props, "p_double", b"FocalLength", cam_data.lens)
elem_props_template_set(tmpl, props, "p_double", b"SafeAreaAspectRatio", aspect)
elem_props_template_set(tmpl, props, "p_enum", b"CameraProjectionType", 1 if cam_data.type == 'ORTHO' else 0)
elem_props_template_set(tmpl, props, "p_double", b"OrthoZoom", cam_data.ortho_scale)
elem_props_template_set(tmpl, props, "p_double", b"NearPlane", cam_data.clip_start * gscale)
elem_props_template_set(tmpl, props, "p_double", b"FarPlane", cam_data.clip_end * gscale)
elem_props_template_set(tmpl, props, "p_enum", b"BackPlaneDistanceMode", 1)
elem_props_template_set(tmpl, props, "p_double", b"BackPlaneDistance", cam_data.clip_end * gscale)
elem_props_template_finalize(tmpl, props)
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, cam_data)
elem_data_single_string(cam, b"TypeFlags", b"Camera")
elem_data_single_int32(cam, b"GeometryVersion", 124)
elem_data_vec_float64(cam, b"Position", loc)
elem_data_vec_float64(cam, b"Up", up)
elem_data_vec_float64(cam, b"LookAt", to)
elem_data_single_int32(cam, b"ShowInfoOnMoving", 1)
elem_data_single_int32(cam, b"ShowAudio", 0)
elem_data_vec_float64(cam, b"AudioColor", (0.0, 1.0, 0.0))
elem_data_single_float64(cam, b"CameraOrthoZoom", 1.0)
def fbx_data_bindpose_element(root, me_obj, me, scene_data, arm_obj=None, mat_world_arm=None, bones=[]):
if arm_obj is None:
arm_obj = me_obj
bindpose_key = get_blender_bindpose_key(arm_obj.bdata, me)
fbx_pose = elem_data_single_int64(root, b"Pose", get_fbx_uuid_from_key(bindpose_key))
fbx_pose.add_string(fbx_name_class(me.name.encode(), b"Pose"))
fbx_pose.add_string(b"BindPose")
elem_data_single_string(fbx_pose, b"Type", b"BindPose")
elem_data_single_int32(fbx_pose, b"Version", FBX_POSE_BIND_VERSION)
elem_data_single_int32(fbx_pose, b"NbPoseNodes", 1 + (1 if (arm_obj != me_obj) else 0) + len(bones))
mat_world_obj = me_obj.fbx_object_matrix(scene_data, global_space=True)
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", me_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_obj))
if arm_obj != me_obj:
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", arm_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(mat_world_arm))
mat_world_bones = {}
for bo_obj in bones:
bomat = bo_obj.fbx_object_matrix(scene_data, rest=True, global_space=True)
mat_world_bones[bo_obj] = bomat
fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
elem_data_single_int64(fbx_posenode, b"Node", bo_obj.fbx_uuid)
elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix4_to_array(bomat))
return mat_world_obj, mat_world_bones
def fbx_data_mesh_shapes_elements(root, me_obj, me, scene_data, fbx_me_tmpl, fbx_me_props):
if me not in scene_data.data_deformers_shape:
return
write_normals = True
_me_key, shape_key, shapes = scene_data.data_deformers_shape[me]
channels = []
for shape, (channel_key, geom_key, shape_verts_co, shape_verts_idx) in shapes.items():
if shape.vertex_group and shape.vertex_group in me_obj.bdata.vertex_groups:
shape_verts_weights = [0.0] * (len(shape_verts_co) // 3)
vg_idx = me_obj.bdata.vertex_groups[shape.vertex_group].index
for sk_idx, v_idx in enumerate(shape_verts_idx):
for vg in me.vertices[v_idx].groups:
if vg.group == vg_idx:
shape_verts_weights[sk_idx] = vg.weight * 100.0
else:
shape_verts_weights = [100.0] * (len(shape_verts_co) // 3)
channels.append((channel_key, shape, shape_verts_weights))
geom = elem_data_single_int64(root, b"Geometry", get_fbx_uuid_from_key(geom_key))
geom.add_string(fbx_name_class(shape.name.encode(), b"Geometry"))
geom.add_string(b"Shape")
tmpl = elem_props_template_init(scene_data.templates, b"Geometry")
props = elem_properties(geom)
elem_props_template_finalize(tmpl, props)
elem_data_single_int32(geom, b"Version", FBX_GEOMETRY_SHAPE_VERSION)
elem_data_single_int32_array(geom, b"Indexes", shape_verts_idx)
elem_data_single_float64_array(geom, b"Vertices", shape_verts_co)
if write_normals:
elem_data_single_float64_array(geom, b"Normals", [0.0] * len(shape_verts_co))
fbx_data_bindpose_element(root, me_obj, me, scene_data)
fbx_shape = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(shape_key))
fbx_shape.add_string(fbx_name_class(me.name.encode(), b"Deformer"))
fbx_shape.add_string(b"BlendShape")
elem_data_single_int32(fbx_shape, b"Version", FBX_DEFORMER_SHAPE_VERSION)
for channel_key, shape, shape_verts_weights in channels:
fbx_channel = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(channel_key))
fbx_channel.add_string(fbx_name_class(shape.name.encode(), b"SubDeformer"))
fbx_channel.add_string(b"BlendShapeChannel")
elem_data_single_int32(fbx_channel, b"Version", FBX_DEFORMER_SHAPECHANNEL_VERSION)
elem_data_single_float64(fbx_channel, b"DeformPercent", shape.value * 100.0)
elem_data_single_float64_array(fbx_channel, b"FullWeights", shape_verts_weights)
elem_props_template_set(fbx_me_tmpl, fbx_me_props, "p_number", shape.name.encode(), shape.value * 100.0,
animatable=True)
def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
def _infinite_gen(val):
while 1:
yield val
me_key, me, _free = scene_data.data_meshes[me_obj]
if me_key in done_meshes:
return
smooth_type = scene_data.settings.mesh_smooth_type
write_normals = True
do_bake_space_transform = me_obj.use_bake_space_transform(scene_data)
geom_mat_co = scene_data.settings.global_matrix if do_bake_space_transform else None
geom_mat_no = Matrix(scene_data.settings.global_matrix_inv_transposed) if do_bake_space_transform else None
if geom_mat_no is not None:
geom_mat_no.translation = Vector()
geom_mat_no.normalize()
geom = elem_data_single_int64(root, b"Geometry", get_fbx_uuid_from_key(me_key))
geom.add_string(fbx_name_class(me.name.encode(), b"Geometry"))
geom.add_string(b"Mesh")
tmpl = elem_props_template_init(scene_data.templates, b"Geometry")
props = elem_properties(geom)
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, me)
write_crease = False
if scene_data.settings.use_subsurf:
last_subsurf = None
for mod in me_obj.bdata.modifiers:
if not (mod.show_render or mod.show_viewport):
continue
if mod.type == 'SUBSURF' and mod.subdivision_type == 'CATMULL_CLARK':
last_subsurf = mod
if last_subsurf:
elem_data_single_int32(geom, b"Smoothness", 2)
elem_data_single_int32(geom, b"BoundaryRule", 2)
elem_data_single_int32(geom, b"PreviewDivisionLevels", last_subsurf.levels)
elem_data_single_int32(geom, b"RenderDivisionLevels", last_subsurf.render_levels)
elem_data_single_int32(geom, b"PreserveBorders", 0)
elem_data_single_int32(geom, b"PreserveHardEdges", 0)
elem_data_single_int32(geom, b"PropagateEdgeHardness", 0)
write_crease = last_subsurf.use_creases
elem_data_single_int32(geom, b"GeometryVersion", FBX_GEOMETRY_VERSION)
t_co = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.vertices) * 3
me.vertices.foreach_get("co", t_co)
elem_data_single_float64_array(geom, b"Vertices", chain(*vcos_transformed_gen(t_co, geom_mat_co)))
del t_co
loop_nbr = len(me.loops)
t_pvi = array.array(data_types.ARRAY_INT32, (0,)) * loop_nbr
t_ls = [None] * len(me.polygons)
me.loops.foreach_get("vertex_index", t_pvi)
me.polygons.foreach_get("loop_start", t_ls)
# Add "fake" faces for loose edges.
if scene_data.settings.use_mesh_edges:
t_le = tuple(e.vertices for e in me.edges if e.is_loose)
t_pvi.extend(chain(*t_le))
t_ls.extend(range(loop_nbr, loop_nbr + len(t_le), 2))
del t_le
# Edges...
# Note: Edges are represented as a loop here: each edge uses a single index, which refers to the polygon array.
# The edge is made by the vertex indexed py this polygon's point and the next one on the same polygon.
t_eli = array.array(data_types.ARRAY_INT32)
edges_map = {}
edges_nbr = 0
if t_ls and t_pvi:
t_ls = set(t_ls)
todo_edges = [None] * len(me.edges) * 2
me.edges.foreach_get("vertices", todo_edges)
todo_edges = set((v1, v2) if v1 < v2 else (v2, v1) for v1, v2 in zip(*(iter(todo_edges),) * 2))
li = 0
vi = vi_start = t_pvi[0]
for li_next, vi_next in enumerate(t_pvi[1:] + t_pvi[:1], start=1):
if li_next in t_ls:
vi2 = vi_start
vi_start = vi_next
else:
vi2 = vi_next
e_key = (vi, vi2) if vi < vi2 else (vi2, vi)
if e_key in todo_edges:
t_eli.append(li)
todo_edges.remove(e_key)
edges_map[e_key] = edges_nbr
edges_nbr += 1
vi = vi_next
li = li_next
# End of edges!
# We have to ^-1 last index of each loop.
for ls in t_ls:
t_pvi[ls - 1] ^= -1
# And finally we can write data!
elem_data_single_int32_array(geom, b"PolygonVertexIndex", t_pvi)
elem_data_single_int32_array(geom, b"Edges", t_eli)
del t_pvi
del t_ls
del t_eli
# And now, layers!
# Smoothing.
if smooth_type in {'FACE', 'EDGE'}:
t_ps = None
_map = b""
if smooth_type == 'FACE':
t_ps = array.array(data_types.ARRAY_INT32, (0,)) * len(me.polygons)
me.polygons.foreach_get("use_smooth", t_ps)
_map = b"ByPolygon"
else: # EDGE
# Write Edge Smoothing.
# Note edge is sharp also if it's used by more than two faces, or one of its faces is flat.
t_ps = array.array(data_types.ARRAY_INT32, (0,)) * edges_nbr
sharp_edges = set()
temp_sharp_edges = {}
for p in me.polygons:
if not p.use_smooth:
sharp_edges.update(p.edge_keys)
continue
for k in p.edge_keys:
if temp_sharp_edges.setdefault(k, 0) > 1:
sharp_edges.add(k)
else:
temp_sharp_edges[k] += 1
del temp_sharp_edges
for e in me.edges:
if e.key not in edges_map:
continue
t_ps[edges_map[e.key]] = not (e.use_edge_sharp or (e.key in sharp_edges))
_map = b"ByEdge"
lay_smooth = elem_data_single_int32(geom, b"LayerElementSmoothing", 0)
elem_data_single_int32(lay_smooth, b"Version", FBX_GEOMETRY_SMOOTHING_VERSION)
elem_data_single_string(lay_smooth, b"Name", b"")
elem_data_single_string(lay_smooth, b"MappingInformationType", _map)
elem_data_single_string(lay_smooth, b"ReferenceInformationType", b"Direct")
elem_data_single_int32_array(lay_smooth, b"Smoothing", t_ps)
del t_ps
if write_crease:
t_ec = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * edges_nbr
for e in me.edges:
if e.key not in edges_map:
continue
# so we need to compensate that to get similar results through FBX...
t_ec[edges_map[e.key]] = e.crease * e.crease
lay_crease = elem_data_single_int32(geom, b"LayerElementEdgeCrease", 0)
elem_data_single_int32(lay_crease, b"Version", FBX_GEOMETRY_CREASE_VERSION)
elem_data_single_string(lay_crease, b"Name", b"")
elem_data_single_string(lay_crease, b"MappingInformationType", b"ByEdge")
elem_data_single_string(lay_crease, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_crease, b"EdgeCrease", t_ec)
del t_ec
# And we are done with edges!
del edges_map
# Loop normals.
tspacenumber = 0
if write_normals:
# NOTE: this is not supported by importer currently.
# XXX Official docs says normals should use IndexToDirect,
# but this does not seem well supported by apps currently...
me.calc_normals_split()
t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 3
me.loops.foreach_get("normal", t_ln)
t_ln = nors_transformed_gen(t_ln, geom_mat_no)
if 0:
t_ln = tuple(t_ln) # No choice... :/
lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION)
elem_data_single_string(lay_nor, b"Name", b"")
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"IndexToDirect")
ln2idx = tuple(set(t_ln))
elem_data_single_float64_array(lay_nor, b"Normals", chain(*ln2idx))
# Normal weights, no idea what it is.
# t_lnw = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(ln2idx)
# elem_data_single_float64_array(lay_nor, b"NormalsW", t_lnw)
ln2idx = {nor: idx for idx, nor in enumerate(ln2idx)}
elem_data_single_int32_array(lay_nor, b"NormalsIndex", (ln2idx[n] for n in t_ln))
del ln2idx
# del t_lnw
else:
lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION)
elem_data_single_string(lay_nor, b"Name", b"")
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Normals", chain(*t_ln))
# Normal weights, no idea what it is.
# t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops)
# elem_data_single_float64_array(lay_nor, b"NormalsW", t_ln)
del t_ln
# tspace
if scene_data.settings.use_tspace:
tspacenumber = len(me.uv_layers)
if tspacenumber:
# We can only compute tspace on tessellated meshes, need to check that here...
t_lt = [None] * len(me.polygons)
me.polygons.foreach_get("loop_total", t_lt)
if any((lt > 4 for lt in t_lt)):
del t_lt
scene_data.settings.report(
{'WARNING'},
"Mesh '%s' has polygons with more than 4 vertices, "
"cannot compute/export tangent space for it" % me.name)
else:
del t_lt
t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 3
# t_lnw = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops)
uv_names = [uvlayer.name for uvlayer in me.uv_layers]
for name in uv_names:
me.calc_tangents(uvmap=name)
for idx, uvlayer in enumerate(me.uv_layers):
name = uvlayer.name
# Loop bitangents (aka binormals).
# NOTE: this is not supported by importer currently.
me.loops.foreach_get("bitangent", t_ln)
lay_nor = elem_data_single_int32(geom, b"LayerElementBinormal", idx)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_BINORMAL_VERSION)
elem_data_single_string_unicode(lay_nor, b"Name", name)
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Binormals",
chain(*nors_transformed_gen(t_ln, geom_mat_no)))
# Binormal weights, no idea what it is.
# elem_data_single_float64_array(lay_nor, b"BinormalsW", t_lnw)
# Loop tangents.
# NOTE: this is not supported by importer currently.
me.loops.foreach_get("tangent", t_ln)
lay_nor = elem_data_single_int32(geom, b"LayerElementTangent", idx)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_TANGENT_VERSION)
elem_data_single_string_unicode(lay_nor, b"Name", name)
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Tangents",
chain(*nors_transformed_gen(t_ln, geom_mat_no)))
# Tangent weights, no idea what it is.
# elem_data_single_float64_array(lay_nor, b"TangentsW", t_lnw)
del t_ln
# del t_lnw
me.free_tangents()
me.free_normals_split()
# Write VertexColor Layers.
vcolnumber = len(me.vertex_colors)
if vcolnumber:
def _coltuples_gen(raw_cols):
return zip(*(iter(raw_cols),) * 4)
t_lc = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 4
for colindex, collayer in enumerate(me.vertex_colors):
collayer.data.foreach_get("color", t_lc)
lay_vcol = elem_data_single_int32(geom, b"LayerElementColor", colindex)
elem_data_single_int32(lay_vcol, b"Version", FBX_GEOMETRY_VCOLOR_VERSION)
elem_data_single_string_unicode(lay_vcol, b"Name", collayer.name)
elem_data_single_string(lay_vcol, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_vcol, b"ReferenceInformationType", b"IndexToDirect")
col2idx = tuple(set(_coltuples_gen(t_lc)))
elem_data_single_float64_array(lay_vcol, b"Colors", chain(*col2idx)) # Flatten again...
col2idx = {col: idx for idx, col in enumerate(col2idx)}
elem_data_single_int32_array(lay_vcol, b"ColorIndex", (col2idx[c] for c in _coltuples_gen(t_lc)))
del col2idx
del t_lc
del _coltuples_gen
# Write UV layers.
# Note: LayerElementTexture is deprecated since FBX 2011 - luckily!
# Textures are now only related to materials, in FBX!
uvnumber = len(me.uv_layers)
if uvnumber:
# Looks like this mapping is also expected to convey UV islands (arg..... :((((( ).
# So we need to generate unique triplets (uv, vertex_idx) here, not only just based on UV values.
def _uvtuples_gen(raw_uvs, raw_lvidxs):
return zip(zip(*(iter(raw_uvs),) * 2), raw_lvidxs)
t_luv = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 2
t_lvidx = array.array(data_types.ARRAY_INT32, (0,)) * len(me.loops)
me.loops.foreach_get("vertex_index", t_lvidx)
for uvindex, uvlayer in enumerate(me.uv_layers):
uvlayer.data.foreach_get("uv", t_luv)
lay_uv = elem_data_single_int32(geom, b"LayerElementUV", uvindex)
elem_data_single_int32(lay_uv, b"Version", FBX_GEOMETRY_UV_VERSION)
elem_data_single_string_unicode(lay_uv, b"Name", uvlayer.name)
elem_data_single_string(lay_uv, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_uv, b"ReferenceInformationType", b"IndexToDirect")
uv_ids = tuple(set(_uvtuples_gen(t_luv, t_lvidx)))
elem_data_single_float64_array(lay_uv, b"UV", chain(*(uv for uv, vidx in uv_ids))) # Flatten again...
uv2idx = {uv_id: idx for idx, uv_id in enumerate(uv_ids)}
elem_data_single_int32_array(lay_uv, b"UVIndex", (uv2idx[uv_id] for uv_id in _uvtuples_gen(t_luv, t_lvidx)))
del uv2idx
del uv_ids
del t_luv
del t_lvidx
del _uvtuples_gen
# Face's materials.
me_fbxmaterials_idx = scene_data.mesh_material_indices.get(me)
if me_fbxmaterials_idx is not None:
me_blmaterials = [mat_slot.material for mat_slot in me_obj.material_slots]
if me_fbxmaterials_idx and me_blmaterials:
lay_ma = elem_data_single_int32(geom, b"LayerElementMaterial", 0)
elem_data_single_int32(lay_ma, b"Version", FBX_GEOMETRY_MATERIAL_VERSION)
elem_data_single_string(lay_ma, b"Name", b"")
nbr_mats = len(me_fbxmaterials_idx)
if nbr_mats > 1:
t_pm = array.array(data_types.ARRAY_INT32, (0,)) * len(me.polygons)
me.polygons.foreach_get("material_index", t_pm)
blmaterials_to_fbxmaterials_idxs = [me_fbxmaterials_idx[m]
for m in me_blmaterials if m in me_fbxmaterials_idx]
ma_idx_limit = len(blmaterials_to_fbxmaterials_idxs)
def_ma = blmaterials_to_fbxmaterials_idxs[0]
_gen = (blmaterials_to_fbxmaterials_idxs[m] if m < ma_idx_limit else def_ma for m in t_pm)
t_pm = array.array(data_types.ARRAY_INT32, _gen)
elem_data_single_string(lay_ma, b"MappingInformationType", b"ByPolygon")
elem_data_single_string(lay_ma, b"ReferenceInformationType", b"IndexToDirect")
elem_data_single_int32_array(lay_ma, b"Materials", t_pm)
del t_pm
else:
elem_data_single_string(lay_ma, b"MappingInformationType", b"AllSame")
elem_data_single_string(lay_ma, b"ReferenceInformationType", b"IndexToDirect")
elem_data_single_int32_array(lay_ma, b"Materials", [0])
layer = elem_data_single_int32(geom, b"Layer", 0)
elem_data_single_int32(layer, b"Version", FBX_GEOMETRY_LAYER_VERSION)
if write_normals:
lay_nor = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_nor, b"Type", b"LayerElementNormal")
elem_data_single_int32(lay_nor, b"TypedIndex", 0)
if tspacenumber:
lay_binor = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_binor, b"Type", b"LayerElementBinormal")
elem_data_single_int32(lay_binor, b"TypedIndex", 0)
lay_tan = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_tan, b"Type", b"LayerElementTangent")
elem_data_single_int32(lay_tan, b"TypedIndex", 0)
if smooth_type in {'FACE', 'EDGE'}:
lay_smooth = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_smooth, b"Type", b"LayerElementSmoothing")
elem_data_single_int32(lay_smooth, b"TypedIndex", 0)
if write_crease:
lay_smooth = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_smooth, b"Type", b"LayerElementEdgeCrease")
elem_data_single_int32(lay_smooth, b"TypedIndex", 0)
if vcolnumber:
lay_vcol = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_vcol, b"Type", b"LayerElementColor")
elem_data_single_int32(lay_vcol, b"TypedIndex", 0)
if uvnumber:
lay_uv = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_uv, b"Type", b"LayerElementUV")
elem_data_single_int32(lay_uv, b"TypedIndex", 0)
if me_fbxmaterials_idx is not None:
lay_ma = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_ma, b"Type", b"LayerElementMaterial")
elem_data_single_int32(lay_ma, b"TypedIndex", 0)
for vcolidx, uvidx, tspaceidx in zip_longest(range(1, vcolnumber), range(1, uvnumber), range(1, tspacenumber),
fillvalue=0):
layer = elem_data_single_int32(geom, b"Layer", max(vcolidx, uvidx))
elem_data_single_int32(layer, b"Version", FBX_GEOMETRY_LAYER_VERSION)
if vcolidx:
lay_vcol = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_vcol, b"Type", b"LayerElementColor")
elem_data_single_int32(lay_vcol, b"TypedIndex", vcolidx)
if uvidx:
lay_uv = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_uv, b"Type", b"LayerElementUV")
elem_data_single_int32(lay_uv, b"TypedIndex", uvidx)
if tspaceidx:
lay_binor = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_binor, b"Type", b"LayerElementBinormal")
elem_data_single_int32(lay_binor, b"TypedIndex", tspaceidx)
lay_tan = elem_empty(layer, b"LayerElement")
elem_data_single_string(lay_tan, b"Type", b"LayerElementTangent")
elem_data_single_int32(lay_tan, b"TypedIndex", tspaceidx)
fbx_data_mesh_shapes_elements(root, me_obj, me, scene_data, tmpl, props)
elem_props_template_finalize(tmpl, props)
done_meshes.add(me_key)
def fbx_data_material_elements(root, ma, scene_data):
ambient_color = (0.0, 0.0, 0.0)
if scene_data.data_world:
ambient_color = next(iter(scene_data.data_world.keys())).color
ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=True)
ma_key, _objs = scene_data.data_materials[ma]
ma_type = b"Phong"
fbx_ma = elem_data_single_int64(root, b"Material", get_fbx_uuid_from_key(ma_key))
fbx_ma.add_string(fbx_name_class(ma.name.encode(), b"Material"))
fbx_ma.add_string(b"")
elem_data_single_int32(fbx_ma, b"Version", FBX_MATERIAL_VERSION)
elem_data_single_string(fbx_ma, b"ShadingModel", ma_type)
elem_data_single_int32(fbx_ma, b"MultiLayer", 0)
tmpl = elem_props_template_init(scene_data.templates, b"Material")
props = elem_properties(fbx_ma)
elem_props_template_set(tmpl, props, "p_string", b"ShadingModel", ma_type.decode())
elem_props_template_set(tmpl, props, "p_color", b"DiffuseColor", ma_wrap.base_color)
elem_props_template_set(tmpl, props, "p_number", b"DiffuseFactor", 1.0)
elem_props_template_set(tmpl, props, "p_color", b"EmissiveColor", ma_wrap.emission_color)
elem_props_template_set(tmpl, props, "p_number", b"EmissiveFactor", 1.0)
elem_props_template_set(tmpl, props, "p_color", b"AmbientColor", ambient_color)
elem_props_template_set(tmpl, props, "p_number", b"AmbientFactor", 0.0)
if ma_wrap.alpha < 1.0e-5 or ma_wrap.alpha > (1.0 - 1.0e-5):
elem_props_template_set(tmpl, props, "p_color", b"TransparentColor", (1.0 - ma_wrap.alpha,) * 3)
else:
elem_props_template_set(tmpl, props, "p_color", b"TransparentColor", ma_wrap.base_color)
elem_props_template_set(tmpl, props, "p_number", b"TransparencyFactor", 1.0 - ma_wrap.alpha)
elem_props_template_set(tmpl, props, "p_number", b"Opacity", ma_wrap.alpha)
elem_props_template_set(tmpl, props, "p_vector_3d", b"NormalMap", (0.0, 0.0, 0.0))
elem_props_template_set(tmpl, props, "p_double", b"BumpFactor", ma_wrap.normalmap_strength)
# Not sure about those...
# TODO: use specular tint?
elem_props_template_set(tmpl, props, "p_color", b"SpecularColor", ma_wrap.base_color)
elem_props_template_set(tmpl, props, "p_number", b"SpecularFactor", ma_wrap.specular / 2.0)
# See Material template about those two!
# XXX Totally empirical conversion, trying to adapt it
# (from 0.0 - 100.0 FBX shininess range to 1.0 - 0.0 Principled BSDF range)...
shininess = (1.0 - ma_wrap.roughness) * 10
shininess *= shininess
elem_props_template_set(tmpl, props, "p_number", b"Shininess", shininess)
elem_props_template_set(tmpl, props, "p_number", b"ShininessExponent", shininess)
elem_props_template_set(tmpl, props, "p_color", b"ReflectionColor", ma_wrap.base_color)
elem_props_template_set(tmpl, props, "p_number", b"ReflectionFactor", ma_wrap.metallic)
elem_props_template_finalize(tmpl, props)
# Custom properties.
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, ma)
def _gen_vid_path(img, scene_data):
msetts = scene_data.settings.media_settings
fname_rel = bpy_extras.io_utils.path_reference(img.filepath, msetts.base_src, msetts.base_dst, msetts.path_mode,
msetts.subdir, msetts.copy_set, img.library)
fname_abs = os.path.normpath(os.path.abspath(os.path.join(msetts.base_dst, fname_rel)))
return fname_abs, fname_rel
def fbx_data_texture_file_elements(root, blender_tex_key, scene_data):
# XXX All this is very fuzzy to me currently...
# Textures do not seem to use properties as much as they could.
# For now assuming most logical and simple stuff.
ma, sock_name = blender_tex_key
ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=True)
tex_key, _fbx_prop = scene_data.data_textures[blender_tex_key]
tex = getattr(ma_wrap, sock_name)
img = tex.image
fname_abs, fname_rel = _gen_vid_path(img, scene_data)
fbx_tex = elem_data_single_int64(root, b"Texture", get_fbx_uuid_from_key(tex_key))
fbx_tex.add_string(fbx_name_class(sock_name.encode(), b"Texture"))
fbx_tex.add_string(b"")
elem_data_single_string(fbx_tex, b"Type", b"TextureVideoClip")
elem_data_single_int32(fbx_tex, b"Version", FBX_TEXTURE_VERSION)
elem_data_single_string(fbx_tex, b"TextureName", fbx_name_class(sock_name.encode(), b"Texture"))
elem_data_single_string(fbx_tex, b"Media", fbx_name_class(img.name.encode(), b"Video"))
elem_data_single_string_unicode(fbx_tex, b"FileName", fname_abs)
elem_data_single_string_unicode(fbx_tex, b"RelativeFilename", fname_rel)
alpha_source = 0 # None
if img.alpha_mode != 'NONE':
# ~ if tex.texture.use_calculate_alpha:
# ~ alpha_source = 1 # RGBIntensity as alpha.
# ~ else:
# ~ alpha_source = 2 # Black, i.e. alpha channel.
alpha_source = 2 # Black, i.e. alpha channel.
# BlendMode not useful for now, only affects layered textures afaics.
mapping = 0 # UV.
uvset = None
if tex.texcoords == 'ORCO': # XXX Others?
if tex.projection == 'FLAT':
mapping = 1 # Planar
elif tex.projection == 'CUBE':
mapping = 4 # Box
elif tex.projection == 'TUBE':
mapping = 3 # Cylindrical
elif tex.projection == 'SPHERE':
mapping = 2 # Spherical
elif tex.texcoords == 'UV':
mapping = 0 # UV
# Yuck, UVs are linked by mere names it seems... :/
# XXX TODO how to get that now???
# uvset = tex.uv_layer
wrap_mode = 1 # Clamp
if tex.extension == 'REPEAT':
wrap_mode = 0 # Repeat
tmpl = elem_props_template_init(scene_data.templates, b"TextureFile")
props = elem_properties(fbx_tex)
elem_props_template_set(tmpl, props, "p_enum", b"AlphaSource", alpha_source)
elem_props_template_set(tmpl, props, "p_bool", b"PremultiplyAlpha",
img.alpha_mode in {'STRAIGHT'}) # Or is it PREMUL?
elem_props_template_set(tmpl, props, "p_enum", b"CurrentMappingType", mapping)
if uvset is not None:
elem_props_template_set(tmpl, props, "p_string", b"UVSet", uvset)
elem_props_template_set(tmpl, props, "p_enum", b"WrapModeU", wrap_mode)
elem_props_template_set(tmpl, props, "p_enum", b"WrapModeV", wrap_mode)
elem_props_template_set(tmpl, props, "p_vector_3d", b"Translation", tex.translation)
elem_props_template_set(tmpl, props, "p_vector_3d", b"Rotation", (-r for r in tex.rotation))
elem_props_template_set(tmpl, props, "p_vector_3d", b"Scaling", (((1.0 / s) if s != 0.0 else 1.0) for s in tex.scale))
# UseMaterial should always be ON imho.
elem_props_template_set(tmpl, props, "p_bool", b"UseMaterial", True)
elem_props_template_set(tmpl, props, "p_bool", b"UseMipMap", False)
elem_props_template_finalize(tmpl, props)
# No custom properties, since that's not a data-block anymore.
def fbx_data_video_elements(root, vid, scene_data):
msetts = scene_data.settings.media_settings
vid_key, _texs = scene_data.data_videos[vid]
fname_abs, fname_rel = _gen_vid_path(vid, scene_data)
fbx_vid = elem_data_single_int64(root, b"Video", get_fbx_uuid_from_key(vid_key))
fbx_vid.add_string(fbx_name_class(vid.name.encode(), b"Video"))
fbx_vid.add_string(b"Clip")
elem_data_single_string(fbx_vid, b"Type", b"Clip")
tmpl = elem_props_template_init(scene_data.templates, b"Video")
props = elem_properties(fbx_vid)
elem_props_template_set(tmpl, props, "p_string_url", b"Path", fname_abs)
elem_props_template_finalize(tmpl, props)
elem_data_single_int32(fbx_vid, b"UseMipMap", 0)
elem_data_single_string_unicode(fbx_vid, b"Filename", fname_abs)
elem_data_single_string_unicode(fbx_vid, b"RelativeFilename", fname_rel)
if scene_data.settings.media_settings.embed_textures:
if vid.packed_file is not None:
if fname_abs not in msetts.embedded_set:
elem_data_single_bytes(fbx_vid, b"Content", vid.packed_file.data)
msetts.embedded_set.add(fname_abs)
else:
filepath = bpy.path.abspath(vid.filepath)
if filepath not in msetts.embedded_set:
try:
with open(filepath, 'br') as f:
elem_data_single_bytes(fbx_vid, b"Content", f.read())
except Exception as e:
print("WARNING: embedding file {} failed ({})".format(filepath, e))
elem_data_single_bytes(fbx_vid, b"Content", b"")
msetts.embedded_set.add(filepath)
# Sounds suspect, but let's try it!
def fbx_data_armature_elements(root, arm_obj, scene_data):
mat_world_arm = arm_obj.fbx_object_matrix(scene_data, global_space=True)
bones = tuple(bo_obj for bo_obj in arm_obj.bones if bo_obj in scene_data.objects)
bone_radius_scale = 33.0
for bo_obj in bones:
bo = bo_obj.bdata
bo_data_key = scene_data.data_bones[bo_obj]
fbx_bo = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(bo_data_key))
fbx_bo.add_string(fbx_name_class(bo.name.encode(), b"NodeAttribute"))
fbx_bo.add_string(b"LimbNode")
elem_data_single_string(fbx_bo, b"TypeFlags", b"Skeleton")
tmpl = elem_props_template_init(scene_data.templates, b"Bone")
props = elem_properties(fbx_bo)
elem_props_template_set(tmpl, props, "p_double", b"Size", bo.head_radius * bone_radius_scale)
elem_props_template_finalize(tmpl, props)
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, bo)
# http://docs.autodesk.com/FBX/2014/ENU/FBX-SDK-Documentation/cpp_ref/class_fbx_skeleton.html#a9bbe2a70f4ed82cd162620259e649f0f )
# elem_props_set(props, "p_double", "BlenderBoneLength".encode(), (bo.tail_local - bo.head_local).length, custom=True)
# Skin deformers and BindPoses.
# Note: we might also use Deformers for our "parent to vertex" stuff???
deformer = scene_data.data_deformers_skin.get(arm_obj, None)
if deformer is not None:
for me, (skin_key, ob_obj, clusters) in deformer.items():
# BindPose.
mat_world_obj, mat_world_bones = fbx_data_bindpose_element(root, ob_obj, me, scene_data,
arm_obj, mat_world_arm, bones)
# Deformer.
fbx_skin = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(skin_key))
fbx_skin.add_string(fbx_name_class(arm_obj.name.encode(), b"Deformer"))
fbx_skin.add_string(b"Skin")
elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is...
# Pre-process vertex weights (also to check vertices assigned ot more than four bones).
ob = ob_obj.bdata
bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index
for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups}
valid_idxs = set(bo_vg_idx.values())
vgroups = {vg.index: {} for vg in ob.vertex_groups}
verts_vgroups = (sorted(((vg.group, vg.weight) for vg in v.groups if vg.weight and vg.group in valid_idxs),
key=lambda e: e[1], reverse=True)
for v in me.vertices)
for idx, vgs in enumerate(verts_vgroups):
for vg_idx, w in vgs:
vgroups[vg_idx][idx] = w
for bo_obj, clstr_key in clusters.items():
bo = bo_obj.bdata
# Find which vertices are affected by this bone/vgroup pair, and matching weights.
# Note we still write a cluster for bones not affecting the mesh, to get 'rest pose' data
# (the TransformBlah matrices).
vg_idx = bo_vg_idx.get(bo.name, None)
indices, weights = ((), ()) if vg_idx is None or not vgroups[vg_idx] else zip(*vgroups[vg_idx].items())
# Create the cluster.
fbx_clstr = elem_data_single_int64(root, b"Deformer", get_fbx_uuid_from_key(clstr_key))
fbx_clstr.add_string(fbx_name_class(bo.name.encode(), b"SubDeformer"))
fbx_clstr.add_string(b"Cluster")
elem_data_single_int32(fbx_clstr, b"Version", FBX_DEFORMER_CLUSTER_VERSION)
# No idea what that user data might be...
fbx_userdata = elem_data_single_string(fbx_clstr, b"UserData", b"")
fbx_userdata.add_string(b"")
if indices:
elem_data_single_int32_array(fbx_clstr, b"Indexes", indices)
elem_data_single_float64_array(fbx_clstr, b"Weights", weights)
# Transform, TransformLink and TransformAssociateModel matrices...
# They seem to be doublons of BindPose ones??? Have armature (associatemodel) in addition, though.
# WARNING! Even though official FBX API presents Transform in global space,
# **it is stored in bone space in FBX data!** See:
# http://area.autodesk.com/forum/autodesk-fbx/fbx-sdk/why-the-values-return-
# by-fbxcluster-gettransformmatrix-x-not-same-with-the-value-in-ascii-fbx-file/
elem_data_single_float64_array(fbx_clstr, b"Transform",
matrix4_to_array(mat_world_bones[bo_obj].inverted_safe() @ mat_world_obj))
elem_data_single_float64_array(fbx_clstr, b"TransformLink", matrix4_to_array(mat_world_bones[bo_obj]))
elem_data_single_float64_array(fbx_clstr, b"TransformAssociateModel", matrix4_to_array(mat_world_arm))
def fbx_data_leaf_bone_elements(root, scene_data):
# Write a dummy leaf bone that is used by applications to show the length of the last bone in a chain
for (node_name, _par_uuid, node_uuid, attr_uuid, matrix, hide, size) in scene_data.data_leaf_bones:
# Bone 'data'...
fbx_bo = elem_data_single_int64(root, b"NodeAttribute", attr_uuid)
fbx_bo.add_string(fbx_name_class(node_name.encode(), b"NodeAttribute"))
fbx_bo.add_string(b"LimbNode")
elem_data_single_string(fbx_bo, b"TypeFlags", b"Skeleton")
tmpl = elem_props_template_init(scene_data.templates, b"Bone")
props = elem_properties(fbx_bo)
elem_props_template_set(tmpl, props, "p_double", b"Size", size)
elem_props_template_finalize(tmpl, props)
# And bone object.
model = elem_data_single_int64(root, b"Model", node_uuid)
model.add_string(fbx_name_class(node_name.encode(), b"Model"))
model.add_string(b"LimbNode")
elem_data_single_int32(model, b"Version", FBX_MODELS_VERSION)
# Object transform info.
loc, rot, scale = matrix.decompose()
rot = rot.to_euler('XYZ')
rot = tuple(convert_rad_to_deg_iter(rot))
tmpl = elem_props_template_init(scene_data.templates, b"Model")
# For now add only loc/rot/scale...
props = elem_properties(model)
# Generated leaf bones are obviously never animated!
elem_props_template_set(tmpl, props, "p_lcl_translation", b"Lcl Translation", loc)
elem_props_template_set(tmpl, props, "p_lcl_rotation", b"Lcl Rotation", rot)
elem_props_template_set(tmpl, props, "p_lcl_scaling", b"Lcl Scaling", scale)
elem_props_template_set(tmpl, props, "p_visibility", b"Visibility", float(not hide))
# Absolutely no idea what this is, but seems mandatory for validity of the file, and defaults to
# invalid -1 value...
elem_props_template_set(tmpl, props, "p_integer", b"DefaultAttributeIndex", 0)
elem_props_template_set(tmpl, props, "p_enum", b"InheritType", 1) # RSrs
# Those settings would obviously need to be edited in a complete version of the exporter, may depends on
# object type, etc.
elem_data_single_int32(model, b"MultiLayer", 0)
elem_data_single_int32(model, b"MultiTake", 0)
elem_data_single_bool(model, b"Shading", True)
elem_data_single_string(model, b"Culling", b"CullingOff")
elem_props_template_finalize(tmpl, props)
def fbx_data_object_elements(root, ob_obj, scene_data):
obj_type = b"Null" # default, sort of empty...
if ob_obj.is_bone:
obj_type = b"LimbNode"
elif (ob_obj.type == 'ARMATURE'):
if scene_data.settings.armature_nodetype == 'ROOT':
obj_type = b"Root"
elif scene_data.settings.armature_nodetype == 'LIMBNODE':
obj_type = b"LimbNode"
else: # Default, preferred option...
obj_type = b"Null"
elif (ob_obj.type in BLENDER_OBJECT_TYPES_MESHLIKE):
obj_type = b"Mesh"
elif (ob_obj.type == 'LIGHT'):
obj_type = b"Light"
elif (ob_obj.type == 'CAMERA'):
obj_type = b"Camera"
model = elem_data_single_int64(root, b"Model", ob_obj.fbx_uuid)
model.add_string(fbx_name_class(ob_obj.name.encode(), b"Model"))
model.add_string(obj_type)
elem_data_single_int32(model, b"Version", FBX_MODELS_VERSION)
# Object transform info.
loc, rot, scale, matrix, matrix_rot = ob_obj.fbx_object_tx(scene_data)
rot = tuple(convert_rad_to_deg_iter(rot))
tmpl = elem_props_template_init(scene_data.templates, b"Model")
# For now add only loc/rot/scale...
props = elem_properties(model)
elem_props_template_set(tmpl, props, "p_lcl_translation", b"Lcl Translation", loc,
animatable=True, animated=((ob_obj.key, "Lcl Translation") in scene_data.animated))
elem_props_template_set(tmpl, props, "p_lcl_rotation", b"Lcl Rotation", rot,
animatable=True, animated=((ob_obj.key, "Lcl Rotation") in scene_data.animated))
elem_props_template_set(tmpl, props, "p_lcl_scaling", b"Lcl Scaling", scale,
animatable=True, animated=((ob_obj.key, "Lcl Scaling") in scene_data.animated))
elem_props_template_set(tmpl, props, "p_visibility", b"Visibility", float(not ob_obj.hide))
# Absolutely no idea what this is, but seems mandatory for validity of the file, and defaults to
# invalid -1 value...
elem_props_template_set(tmpl, props, "p_integer", b"DefaultAttributeIndex", 0)
elem_props_template_set(tmpl, props, "p_enum", b"InheritType", 1) # RSrs
# Custom properties.
if scene_data.settings.use_custom_props:
# Here we want customprops from the 'pose' bone, not the 'edit' bone...
bdata = ob_obj.bdata_pose_bone if ob_obj.is_bone else ob_obj.bdata
fbx_data_element_custom_properties(props, bdata)
# Those settings would obviously need to be edited in a complete version of the exporter, may depends on
# object type, etc.
elem_data_single_int32(model, b"MultiLayer", 0)
elem_data_single_int32(model, b"MultiTake", 0)
elem_data_single_bool(model, b"Shading", True)
elem_data_single_string(model, b"Culling", b"CullingOff")
if obj_type == b"Camera":
# Why, oh why are FBX cameras such a mess???
# And WHY add camera data HERE??? Not even sure this is needed...
render = scene_data.scene.render
width = render.resolution_x * 1.0
height = render.resolution_y * 1.0
elem_props_template_set(tmpl, props, "p_enum", b"ResolutionMode", 0) # Don't know what it means
elem_props_template_set(tmpl, props, "p_double", b"AspectW", width)
elem_props_template_set(tmpl, props, "p_double", b"AspectH", height)
elem_props_template_set(tmpl, props, "p_bool", b"ViewFrustum", True)
elem_props_template_set(tmpl, props, "p_enum", b"BackgroundMode", 0)
elem_props_template_set(tmpl, props, "p_bool", b"ForegroundTransparent", True)
elem_props_template_finalize(tmpl, props)
def fbx_data_animation_elements(root, scene_data):
animations = scene_data.animations
if not animations:
return
scene = scene_data.scene
fps = scene.render.fps / scene.render.fps_base
def keys_to_ktimes(keys):
return (int(v) for v in convert_sec_to_ktime_iter((f / fps for f, _v in keys)))
# Animation stacks.
for astack_key, alayers, alayer_key, name, f_start, f_end in animations:
astack = elem_data_single_int64(root, b"AnimationStack", get_fbx_uuid_from_key(astack_key))
astack.add_string(fbx_name_class(name, b"AnimStack"))
astack.add_string(b"")
astack_tmpl = elem_props_template_init(scene_data.templates, b"AnimationStack")
astack_props = elem_properties(astack)
r = scene_data.scene.render
fps = r.fps / r.fps_base
start = int(convert_sec_to_ktime(f_start / fps))
end = int(convert_sec_to_ktime(f_end / fps))
elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"LocalStart", start)
elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"LocalStop", end)
elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"ReferenceStart", start)
elem_props_template_set(astack_tmpl, astack_props, "p_timestamp", b"ReferenceStop", end)
elem_props_template_finalize(astack_tmpl, astack_props)
# For now, only one layer for all animations.
alayer = elem_data_single_int64(root, b"AnimationLayer", get_fbx_uuid_from_key(alayer_key))
alayer.add_string(fbx_name_class(name, b"AnimLayer"))
alayer.add_string(b"")
for ob_obj, (alayer_key, acurvenodes) in alayers.items():
# Animation layer.
# alayer = elem_data_single_int64(root, b"AnimationLayer", get_fbx_uuid_from_key(alayer_key))
# alayer.add_string(fbx_name_class(ob_obj.name.encode(), b"AnimLayer"))
# alayer.add_string(b"")
for fbx_prop, (acurvenode_key, acurves, acurvenode_name) in acurvenodes.items():
# Animation curve node.
acurvenode = elem_data_single_int64(root, b"AnimationCurveNode", get_fbx_uuid_from_key(acurvenode_key))
acurvenode.add_string(fbx_name_class(acurvenode_name.encode(), b"AnimCurveNode"))
acurvenode.add_string(b"")
acn_tmpl = elem_props_template_init(scene_data.templates, b"AnimationCurveNode")
acn_props = elem_properties(acurvenode)
for fbx_item, (acurve_key, def_value, keys, _acurve_valid) in acurves.items():
elem_props_template_set(acn_tmpl, acn_props, "p_number", fbx_item.encode(),
def_value, animatable=True)
# Only create Animation curve if needed!
if keys:
acurve = elem_data_single_int64(root, b"AnimationCurve", get_fbx_uuid_from_key(acurve_key))
acurve.add_string(fbx_name_class(b"", b"AnimCurve"))
acurve.add_string(b"")
# key attributes...
nbr_keys = len(keys)
# flags...
keyattr_flags = (
1 << 2 | # interpolation mode, 1 = constant, 2 = linear, 3 = cubic.
1 << 8 | # tangent mode, 8 = auto, 9 = TCB, 10 = user, 11 = generic break,
1 << 13 | # tangent mode, 12 = generic clamp, 13 = generic time independent,
1 << 14 | # tangent mode, 13 + 14 = generic clamp progressive.
0,
)
# Maybe values controlling TCB & co???
keyattr_datafloat = (0.0, 0.0, 9.419963346924634e-30, 0.0)
# And now, the *real* data!
elem_data_single_float64(acurve, b"Default", def_value)
elem_data_single_int32(acurve, b"KeyVer", FBX_ANIM_KEY_VERSION)
elem_data_single_int64_array(acurve, b"KeyTime", keys_to_ktimes(keys))
elem_data_single_float32_array(acurve, b"KeyValueFloat", (v for _f, v in keys))
elem_data_single_int32_array(acurve, b"KeyAttrFlags", keyattr_flags)
elem_data_single_float32_array(acurve, b"KeyAttrDataFloat", keyattr_datafloat)
elem_data_single_int32_array(acurve, b"KeyAttrRefCount", (nbr_keys,))
elem_props_template_finalize(acn_tmpl, acn_props)
# ##### Top-level FBX data container. #####
# Mapping Blender -> FBX (principled_socket_name, fbx_name).
PRINCIPLED_TEXTURE_SOCKETS_TO_FBX = (
# ("diffuse", "diffuse", b"DiffuseFactor"),
("base_color_texture", b"DiffuseColor"),
("alpha_texture", b"TransparencyFactor"), # Will be inverted in fact, not much we can do really...
# ("base_color_texture", b"TransparentColor"), # Uses diffuse color in Blender!
# ("emit", "emit", b"EmissiveFactor"),
("emission_color_texture", b"EmissiveColor"),
# ("ambient", "ambient", b"AmbientFactor"),
# ("", "", b"AmbientColor"), # World stuff in Blender, for now ignore...
("normalmap_texture", b"NormalMap"),
# Note: unsure about those... :/
# ("", "", b"Bump"),
# ("", "", b"BumpFactor"),
# ("", "", b"DisplacementColor"),
# ("", "", b"DisplacementFactor"),
("specular_texture", b"SpecularFactor"),
# ("base_color", b"SpecularColor"), # TODO: use tint?
# See Material template about those two!
("roughness_texture", b"Shininess"),
("roughness_texture", b"ShininessExponent"),
# ("mirror", "mirror", b"ReflectionColor"),
("metallic_texture", b"ReflectionFactor"),
)
def fbx_skeleton_from_armature(scene, settings, arm_obj, objects, data_meshes,
data_bones, data_deformers_skin, data_empties, arm_parents):
# We need some data for our armature 'object' too!!!
data_empties[arm_obj] = get_blender_empty_key(arm_obj.bdata)
arm_data = arm_obj.bdata.data
bones = {}
for bo in arm_obj.bones:
if settings.use_armature_deform_only:
if bo.bdata.use_deform:
bones[bo] = True
bo_par = bo.parent
while bo_par.is_bone:
bones[bo_par] = True
bo_par = bo_par.parent
elif bo not in bones: # Do not override if already set in the loop above!
bones[bo] = False
else:
bones[bo] = True
bones = {bo: None for bo, use in bones.items() if use}
if not bones:
return
data_bones.update((bo, get_blender_bone_key(arm_obj.bdata, bo.bdata)) for bo in bones)
for ob_obj in objects:
if not ob_obj.is_deformed_by_armature(arm_obj):
continue
# Always handled by an Armature modifier...
found = False
for mod in ob_obj.bdata.modifiers:
if mod.type not in {'ARMATURE'} or not mod.object:
continue
# We only support vertex groups binding method, not bone envelopes one!
if mod.object in {arm_obj.bdata, arm_obj.bdata.proxy} and mod.use_vertex_groups:
found = True
break
if not found:
continue
# Now we have a mesh using this armature.
# Note: bindpose have no relations at all (no connections), so no need for any preprocess for them.
# Create skin & clusters relations (note skins are connected to geometry, *not* model!).
_key, me, _free = data_meshes[ob_obj]
clusters = {bo: get_blender_bone_cluster_key(arm_obj.bdata, me, bo.bdata) for bo in bones}
data_deformers_skin.setdefault(arm_obj, {})[me] = (get_blender_armature_skin_key(arm_obj.bdata, me),
ob_obj, clusters)
# We don't want a regular parent relationship for those in FBX...
arm_parents.add((arm_obj, ob_obj))
ob_obj.parented_to_armature = True
objects.update(bones)
def fbx_generate_leaf_bones(settings, data_bones):
child_count = {bo: 0 for bo in data_bones.keys()}
for bo in data_bones.keys():
if bo.parent and bo.parent.is_bone:
child_count[bo.parent] += 1
bone_radius_scale = settings.global_scale * 33.0
leaf_parents = [bo for bo, count in child_count.items() if count == 0]
leaf_bones = []
for parent in leaf_parents:
node_name = parent.name + "_end"
parent_uuid = parent.fbx_uuid
parent_key = parent.key
node_uuid = get_fbx_uuid_from_key(parent_key + "_end_node")
attr_uuid = get_fbx_uuid_from_key(parent_key + "_end_nodeattr")
hide = parent.hide
size = parent.bdata.head_radius * bone_radius_scale
bone_length = (parent.bdata.tail_local - parent.bdata.head_local).length
matrix = Matrix.Translation((0, bone_length, 0))
if settings.bone_correction_matrix_inv:
matrix = settings.bone_correction_matrix_inv @ matrix
if settings.bone_correction_matrix:
matrix = matrix @ settings.bone_correction_matrix
leaf_bones.append((node_name, parent_uuid, node_uuid, attr_uuid, matrix, hide, size))
return leaf_bones
def fbx_animations_do(scene_data, ref_id, f_start, f_end, start_zero, objects=None, force_keep=False):
bake_step = scene_data.settings.bake_anim_step
simplify_fac = scene_data.settings.bake_anim_simplify_factor
scene = scene_data.scene
depsgraph = scene_data.depsgraph
force_keying = scene_data.settings.bake_anim_use_all_bones
force_sek = scene_data.settings.bake_anim_force_startend_keying
if objects is not None:
for ob_obj in tuple(objects):
if not ob_obj.is_object:
continue
if ob_obj.type == 'ARMATURE':
objects |= {bo_obj for bo_obj in ob_obj.bones if bo_obj in scene_data.objects}
for dp_obj in ob_obj.dupli_list_gen(depsgraph):
if dp_obj in scene_data.objects:
objects.add(dp_obj)
else:
objects = scene_data.objects
back_currframe = scene.frame_current
animdata_ob = {}
p_rots = {}
for ob_obj in objects:
if ob_obj.parented_to_armature:
continue
ACNW = AnimationCurveNodeWrapper
loc, rot, scale, _m, _mr = ob_obj.fbx_object_tx(scene_data)
rot_deg = tuple(convert_rad_to_deg_iter(rot))
force_key = (simplify_fac == 0.0) or (ob_obj.is_bone and force_keying)
animdata_ob[ob_obj] = (ACNW(ob_obj.key, 'LCL_TRANSLATION', force_key, force_sek, loc),
ACNW(ob_obj.key, 'LCL_ROTATION', force_key, force_sek, rot_deg),
ACNW(ob_obj.key, 'LCL_SCALING', force_key, force_sek, scale))
p_rots[ob_obj] = rot
force_key = (simplify_fac == 0.0)
animdata_shapes = {}
for me, (me_key, _shapes_key, shapes) in scene_data.data_deformers_shape.items():
if not me.shape_keys.use_relative:
continue
for shape, (channel_key, geom_key, _shape_verts_co, _shape_verts_idx) in shapes.items():
acnode = AnimationCurveNodeWrapper(channel_key, 'SHAPE_KEY', force_key, force_sek, (0.0,))
acnode.add_group(me_key, shape.name, shape.name, (shape.name,))
animdata_shapes[channel_key] = (acnode, me, shape)
animdata_cameras = {}
for cam_obj, cam_key in scene_data.data_cameras.items():
cam = cam_obj.bdata.data
acnode = AnimationCurveNodeWrapper(cam_key, 'CAMERA_FOCAL', force_key, force_sek, (cam.lens,))
animdata_cameras[cam_key] = (acnode, cam)
currframe = f_start
while currframe <= f_end:
real_currframe = currframe - f_start if start_zero else currframe
scene.frame_set(int(currframe), subframe=currframe - int(currframe))
for dp_obj in ob_obj.dupli_list_gen(depsgraph):
pass
for ob_obj, (anim_loc, anim_rot, anim_scale) in animdata_ob.items():
p_rot = p_rots.get(ob_obj, None)
loc, rot, scale, _m, _mr = ob_obj.fbx_object_tx(scene_data, rot_euler_compat=p_rot)
p_rots[ob_obj] = rot
anim_loc.add_keyframe(real_currframe, loc)
anim_rot.add_keyframe(real_currframe, tuple(convert_rad_to_deg_iter(rot)))
anim_scale.add_keyframe(real_currframe, scale)
for anim_shape, me, shape in animdata_shapes.values():
anim_shape.add_keyframe(real_currframe, (shape.value * 100.0,))
for anim_camera, camera in animdata_cameras.values():
anim_camera.add_keyframe(real_currframe, (camera.lens,))
currframe += bake_step
scene.frame_set(back_currframe, subframe=0.0)
animations = {}
for ob_obj, anims in animdata_ob.items():
for anim in anims:
anim.simplify(simplify_fac, bake_step, force_keep)
if not anim:
continue
for obj_key, group_key, group, fbx_group, fbx_gname in anim.get_final_data(scene, ref_id, force_keep):
anim_data = animations.setdefault(obj_key, ("dummy_unused_key", {}))
anim_data[1][fbx_group] = (group_key, group, fbx_gname)
for channel_key, (anim_shape, me, shape) in animdata_shapes.items():
final_keys = {}
anim_shape.simplify(simplify_fac, bake_step, force_keep)
if not anim_shape:
continue
for elem_key, group_key, group, fbx_group, fbx_gname in anim_shape.get_final_data(scene, ref_id, force_keep):
anim_data = animations.setdefault(elem_key, ("dummy_unused_key", {}))
anim_data[1][fbx_group] = (group_key, group, fbx_gname)
# And cameras' lens keys.
for cam_key, (anim_camera, camera) in animdata_cameras.items():
final_keys = {}
anim_camera.simplify(simplify_fac, bake_step, force_keep)
if not anim_camera:
continue
for elem_key, group_key, group, fbx_group, fbx_gname in anim_camera.get_final_data(scene, ref_id, force_keep):
anim_data = animations.setdefault(elem_key, ("dummy_unused_key", {}))
anim_data[1][fbx_group] = (group_key, group, fbx_gname)
astack_key = get_blender_anim_stack_key(scene, ref_id)
alayer_key = get_blender_anim_layer_key(scene, ref_id)
name = (get_blenderID_name(ref_id) if ref_id else scene.name).encode()
if start_zero:
f_end -= f_start
f_start = 0.0
return (astack_key, animations, alayer_key, name, f_start, f_end) if animations else None
def fbx_animations(scene_data):
scene = scene_data.scene
animations = []
animated = set()
frame_start = 1e100
frame_end = -1e100
def add_anim(animations, animated, anim):
nonlocal frame_start, frame_end
if anim is not None:
animations.append(anim)
f_start, f_end = anim[4:6]
if f_start < frame_start:
frame_start = f_start
if f_end > frame_end:
frame_end = f_end
_astack_key, astack, _alayer_key, _name, _fstart, _fend = anim
for elem_key, (alayer_key, acurvenodes) in astack.items():
for fbx_prop, (acurvenode_key, acurves, acurvenode_name) in acurvenodes.items():
animated.add((elem_key, fbx_prop))
if scene_data.settings.bake_anim_use_nla_strips:
strips = []
ob_actions = []
for ob_obj in scene_data.objects:
if not ob_obj.is_object:
continue
ob = ob_obj.bdata
if not ob.animation_data:
continue
ob_actions.append((ob, ob.animation_data.action))
ob.animation_data.action = None
for track in ob.animation_data.nla_tracks:
if track.mute:
continue
for strip in track.strips:
if strip.mute:
continue
strips.append(strip)
strip.mute = True
for strip in strips:
strip.mute = False
add_anim(animations, animated,
fbx_animations_do(scene_data, strip, strip.frame_start, strip.frame_end, True, force_keep=True))
strip.mute = True
scene.frame_set(scene.frame_current, subframe=0.0)
for strip in strips:
strip.mute = False
for ob, ob_act in ob_actions:
ob.animation_data.action = ob_act
if scene_data.settings.bake_anim_use_all_actions:
def validate_actions(act, path_resolve):
for fc in act.fcurves:
data_path = fc.data_path
if fc.array_index:
data_path = data_path + "[%d]" % fc.array_index
try:
path_resolve(data_path)
except ValueError:
return False
return True
def restore_object(ob_to, ob_from):
props = (
'location', 'rotation_quaternion', 'rotation_axis_angle', 'rotation_euler', 'rotation_mode', 'scale',
'delta_location', 'delta_rotation_euler', 'delta_rotation_quaternion', 'delta_scale',
'lock_location', 'lock_rotation', 'lock_rotation_w', 'lock_rotations_4d', 'lock_scale',
'tag', 'track_axis', 'up_axis', 'active_material', 'active_material_index',
'matrix_parent_inverse', 'empty_display_type', 'empty_display_size', 'empty_image_offset', 'pass_index',
'color', 'hide_viewport', 'hide_select', 'hide_render', 'instance_type',
'use_instance_vertices_rotation', 'use_instance_faces_scale', 'instance_faces_scale',
'display_type', 'show_bounds', 'display_bounds_type', 'show_name', 'show_axis', 'show_texture_space',
'show_wire', 'show_all_edges', 'show_transparent', 'show_in_front',
'show_only_shape_key', 'use_shape_key_edit_mode', 'active_shape_key_index',
)
for p in props:
if not ob_to.is_property_readonly(p):
setattr(ob_to, p, getattr(ob_from, p))
for ob_obj in scene_data.objects:
if not ob_obj.is_object:
continue
ob = ob_obj.bdata
if not ob.animation_data:
continue
if ob.animation_data.is_property_readonly('action'):
continue
# So we have to add a temp copy of the object to the scene, animate it, and remove it... :/
ob_copy = ob.copy()
# Great, have to handle bones as well if needed...
pbones_matrices = [pbo.matrix_basis.copy() for pbo in ob.pose.bones] if ob.type == 'ARMATURE' else ...
org_act = ob.animation_data.action
path_resolve = ob.path_resolve
for act in bpy.data.actions:
# For now, *all* paths in the action must be valid for the object, to validate the action.
# Unless that action was already assigned to the object!
if act != org_act and not validate_actions(act, path_resolve):
continue
ob.animation_data.action = act
frame_start, frame_end = act.frame_range # sic!
add_anim(animations, animated,
fbx_animations_do(scene_data, (ob, act), frame_start, frame_end, True,
objects={ob_obj}, force_keep=True))
# Ugly! :/
if pbones_matrices is not ...:
for pbo, mat in zip(ob.pose.bones, pbones_matrices):
pbo.matrix_basis = mat.copy()
ob.animation_data.action = org_act
restore_object(ob, ob_copy)
scene.frame_set(scene.frame_current, subframe=0.0)
if pbones_matrices is not ...:
for pbo, mat in zip(ob.pose.bones, pbones_matrices):
pbo.matrix_basis = mat.copy()
ob.animation_data.action = org_act
bpy.data.objects.remove(ob_copy)
scene.frame_set(scene.frame_current, subframe=0.0)
# Global (containing everything) animstack, only if not exporting NLA strips and/or all actions.
if not scene_data.settings.bake_anim_use_nla_strips and not scene_data.settings.bake_anim_use_all_actions:
add_anim(animations, animated, fbx_animations_do(scene_data, None, scene.frame_start, scene.frame_end, False))
# Be sure to update all matrices back to org state!
scene.frame_set(scene.frame_current, subframe=0.0)
return animations, animated, frame_start, frame_end
def fbx_data_from_scene(scene, depsgraph, settings):
objtypes = settings.object_types
dp_objtypes = objtypes - {'ARMATURE'} # Armatures are not supported as dupli instances currently...
perfmon = PerfMon()
perfmon.level_up()
# ##### Gathering data...
perfmon.step("FBX export prepare: Wrapping Objects...")
# This is rather simple for now, maybe we could end generating templates with most-used values
# instead of default ones?
objects = {} # Because we do not have any ordered set...
for ob in settings.context_objects:
if ob.type not in objtypes:
continue
ob_obj = ObjectWrapper(ob)
objects[ob_obj] = None
# Duplis...
for dp_obj in ob_obj.dupli_list_gen(depsgraph):
if dp_obj.type not in dp_objtypes:
continue
objects[dp_obj] = None
perfmon.step("FBX export prepare: Wrapping Data (lamps, cameras, empties)...")
data_lights = {ob_obj.bdata.data: get_blenderID_key(ob_obj.bdata.data)
for ob_obj in objects if ob_obj.type == 'LIGHT'}
# Unfortunately, FBX camera data contains object-level data (like position, orientation, etc.)...
data_cameras = {ob_obj: get_blenderID_key(ob_obj.bdata.data)
for ob_obj in objects if ob_obj.type == 'CAMERA'}
# Yep! Contains nothing, but needed!
data_empties = {ob_obj: get_blender_empty_key(ob_obj.bdata)
for ob_obj in objects if ob_obj.type == 'EMPTY'}
perfmon.step("FBX export prepare: Wrapping Meshes...")
data_meshes = {}
for ob_obj in objects:
if ob_obj.type not in BLENDER_OBJECT_TYPES_MESHLIKE:
continue
ob = ob_obj.bdata
use_org_data = True
org_ob_obj = None
# Do not want to systematically recreate a new mesh for dupliobject instances, kind of break purpose of those.
if ob_obj.is_dupli:
org_ob_obj = ObjectWrapper(ob) # We get the "real" object wrapper from that dupli instance.
if org_ob_obj in data_meshes:
data_meshes[ob_obj] = data_meshes[org_ob_obj]
continue
is_ob_material = any(ms.link == 'OBJECT' for ms in ob.material_slots)
if settings.use_mesh_modifiers or ob.type in BLENDER_OTHER_OBJECT_TYPES or is_ob_material:
# We cannot use default mesh in that case, or material would not be the right ones...
use_org_data = not (is_ob_material or ob.type in BLENDER_OTHER_OBJECT_TYPES)
backup_pose_positions = []
tmp_mods = []
if use_org_data and ob.type == 'MESH':
# No need to create a new mesh in this case, if no modifier is active!
last_subsurf = None
for mod in ob.modifiers:
# For meshes, when armature export is enabled, disable Armature modifiers here!
# XXX Temp hacks here since currently we only have access to a viewport depsgraph...
#
# NOTE: We put armature to the rest pose instead of disabling it so we still
# have vertex groups in the evaluated mesh.
if mod.type == 'ARMATURE' and 'ARMATURE' in settings.object_types:
object = mod.object
if object and object.type == 'ARMATURE':
armature = object.data
backup_pose_positions.append((armature, armature.pose_position))
armature.pose_position = 'REST'
elif mod.show_render or mod.show_viewport:
# If exporting with subsurf collect the last Catmull-Clark subsurf modifier
# and disable it. We can use the original data as long as this is the first
# found applicable subsurf modifier.
if settings.use_subsurf and mod.type == 'SUBSURF' and mod.subdivision_type == 'CATMULL_CLARK':
if last_subsurf:
use_org_data = False
last_subsurf = mod
else:
use_org_data = False
if settings.use_subsurf and last_subsurf:
# XXX: When exporting with subsurf information temporarily disable
# the last subsurf modifier.
tmp_mods.append((last_subsurf, last_subsurf.show_render, last_subsurf.show_viewport))
last_subsurf.show_render = False
last_subsurf.show_viewport = False
if not use_org_data:
# If modifiers has been altered need to update dependency graph.
if backup_pose_positions or tmp_mods:
depsgraph.update()
ob_to_convert = ob.evaluated_get(depsgraph) if settings.use_mesh_modifiers else ob
# NOTE: The dependency graph might be re-evaluating multiple times, which could
# potentially free the mesh created early on. So we put those meshes to bmain and
# free them afterwards. Not ideal but ensures correct ownerwhip.
tmp_me = bpy.data.meshes.new_from_object(
ob_to_convert, preserve_all_data_layers=True, depsgraph=depsgraph)
data_meshes[ob_obj] = (get_blenderID_key(tmp_me), tmp_me, True)
# Change armatures back.
for armature, pose_position in backup_pose_positions:
print((armature, pose_position))
armature.pose_position = pose_position
# Update now, so we don't leave modified state after last object was exported.
for mod, show_render, show_viewport in tmp_mods:
mod.show_render = show_render
mod.show_viewport = show_viewport
if backup_pose_positions or tmp_mods:
depsgraph.update()
if use_org_data:
data_meshes[ob_obj] = (get_blenderID_key(ob.data), ob.data, False)
if org_ob_obj is not None:
data_meshes[org_ob_obj] = data_meshes[ob_obj]
perfmon.step("FBX export prepare: Wrapping ShapeKeys...")
data_deformers_shape = {}
geom_mat_co = settings.global_matrix if settings.bake_space_transform else None
for me_key, me, _free in data_meshes.values():
if not (me.shape_keys and len(me.shape_keys.key_blocks) > 1):
continue
if me in data_deformers_shape:
continue
shapes_key = get_blender_mesh_shape_key(me)
_cos = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.vertices) * 3
me.vertices.foreach_get("co", _cos)
v_cos = tuple(vcos_transformed_gen(_cos, geom_mat_co))
sk_cos = {}
for shape in me.shape_keys.key_blocks[1:]:
shape.data.foreach_get("co", _cos)
sk_cos[shape] = tuple(vcos_transformed_gen(_cos, geom_mat_co))
sk_base = me.shape_keys.key_blocks[0]
for shape in me.shape_keys.key_blocks[1:]:
shape_verts_co = []
shape_verts_idx = []
sv_cos = sk_cos[shape]
ref_cos = v_cos if shape.relative_key == sk_base else sk_cos[shape.relative_key]
for idx, (sv_co, ref_co) in enumerate(zip(sv_cos, ref_cos)):
if similar_values_iter(sv_co, ref_co):
continue
shape_verts_co.extend(Vector(sv_co) - Vector(ref_co))
shape_verts_idx.append(idx)
if not shape_verts_co:
shape_verts_co.extend((0, 0, 0))
shape_verts_idx.append(0)
channel_key, geom_key = get_blender_mesh_shape_channel_key(me, shape)
data = (channel_key, geom_key, shape_verts_co, shape_verts_idx)
data_deformers_shape.setdefault(me, (me_key, shapes_key, {}))[2][shape] = data
perfmon.step("FBX export prepare: Wrapping Armatures...")
data_deformers_skin = {}
data_bones = {}
arm_parents = set()
for ob_obj in tuple(objects):
if not (ob_obj.is_object and ob_obj.type in {'ARMATURE'}):
continue
fbx_skeleton_from_armature(scene, settings, ob_obj, objects, data_meshes,
data_bones, data_deformers_skin, data_empties, arm_parents)
data_leaf_bones = []
if settings.add_leaf_bones:
data_leaf_bones = fbx_generate_leaf_bones(settings, data_bones)
perfmon.step("FBX export prepare: Wrapping World...")
if scene.world:
data_world = {scene.world: get_blenderID_key(scene.world)}
else:
data_world = {}
perfmon.step("FBX export prepare: Wrapping Materials...")
data_materials = {}
for ob_obj in objects:
for ma_s in ob_obj.material_slots:
ma = ma_s.material
if ma is None:
continue
ma_data = data_materials.setdefault(ma, (get_blenderID_key(ma), []))
ma_data[1].append(ob_obj)
perfmon.step("FBX export prepare: Wrapping Textures...")
# Note FBX textures also hold their mapping info.
# TODO: Support layers?
data_textures = {}
# FbxVideo also used to store static images...
data_videos = {}
# For now, do not use world textures, don't think they can be linked to anything FBX wise...
for ma in data_materials.keys():
ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma, is_readonly=True)
for sock_name, fbx_name in PRINCIPLED_TEXTURE_SOCKETS_TO_FBX:
tex = getattr(ma_wrap, sock_name)
if tex is None or tex.image is None:
continue
blender_tex_key = (ma, sock_name)
data_textures[blender_tex_key] = (get_blender_nodetexture_key(*blender_tex_key), fbx_name)
img = tex.image
vid_data = data_videos.setdefault(img, (get_blenderID_key(img), []))
vid_data[1].append(blender_tex_key)
perfmon.step("FBX export prepare: Wrapping Animations...")
animations = ()
animated = set()
frame_start = scene.frame_start
frame_end = scene.frame_end
if settings.bake_anim:
tmp_scdata = FBXExportData(
None, None, None,
settings, scene, depsgraph, objects, None, None, 0.0, 0.0,
data_empties, data_lights, data_cameras, data_meshes, None,
data_bones, data_leaf_bones, data_deformers_skin, data_deformers_shape,
data_world, data_materials, data_textures, data_videos,
)
animations, animated, frame_start, frame_end = fbx_animations(tmp_scdata)
# ##### Creation of templates...
perfmon.step("FBX export prepare: Generating templates...")
templates = {}
templates[b"GlobalSettings"] = fbx_template_def_globalsettings(scene, settings, nbr_users=1)
if data_empties:
templates[b"Null"] = fbx_template_def_null(scene, settings, nbr_users=len(data_empties))
if data_lights:
templates[b"Light"] = fbx_template_def_light(scene, settings, nbr_users=len(data_lights))
if data_cameras:
templates[b"Camera"] = fbx_template_def_camera(scene, settings, nbr_users=len(data_cameras))
if data_bones:
templates[b"Bone"] = fbx_template_def_bone(scene, settings, nbr_users=len(data_bones))
if data_meshes:
nbr = len({me_key for me_key, _me, _free in data_meshes.values()})
if data_deformers_shape:
nbr += sum(len(shapes[2]) for shapes in data_deformers_shape.values())
templates[b"Geometry"] = fbx_template_def_geometry(scene, settings, nbr_users=nbr)
if objects:
templates[b"Model"] = fbx_template_def_model(scene, settings, nbr_users=len(objects))
if arm_parents:
# Number of Pose|BindPose elements should be the same as number of meshes-parented-to-armatures
templates[b"BindPose"] = fbx_template_def_pose(scene, settings, nbr_users=len(arm_parents))
if data_deformers_skin or data_deformers_shape:
nbr = 0
if data_deformers_skin:
nbr += len(data_deformers_skin)
nbr += sum(len(clusters) for def_me in data_deformers_skin.values() for a, b, clusters in def_me.values())
if data_deformers_shape:
nbr += len(data_deformers_shape)
nbr += sum(len(shapes[2]) for shapes in data_deformers_shape.values())
assert(nbr != 0)
templates[b"Deformers"] = fbx_template_def_deformer(scene, settings, nbr_users=nbr)
# No world support in FBX...
if data_materials:
templates[b"Material"] = fbx_template_def_material(scene, settings, nbr_users=len(data_materials))
if data_textures:
templates[b"TextureFile"] = fbx_template_def_texture_file(scene, settings, nbr_users=len(data_textures))
if data_videos:
templates[b"Video"] = fbx_template_def_video(scene, settings, nbr_users=len(data_videos))
if animations:
nbr_astacks = len(animations)
nbr_acnodes = 0
nbr_acurves = 0
for _astack_key, astack, _al, _n, _fs, _fe in animations:
for _alayer_key, alayer in astack.values():
for _acnode_key, acnode, _acnode_name in alayer.values():
nbr_acnodes += 1
for _acurve_key, _dval, acurve, acurve_valid in acnode.values():
if acurve:
nbr_acurves += 1
templates[b"AnimationStack"] = fbx_template_def_animstack(scene, settings, nbr_users=nbr_astacks)
# Would be nice to have one layer per animated object, but this seems tricky and not that well supported.
# So for now, only one layer per anim stack.
templates[b"AnimationLayer"] = fbx_template_def_animlayer(scene, settings, nbr_users=nbr_astacks)
templates[b"AnimationCurveNode"] = fbx_template_def_animcurvenode(scene, settings, nbr_users=nbr_acnodes)
templates[b"AnimationCurve"] = fbx_template_def_animcurve(scene, settings, nbr_users=nbr_acurves)
templates_users = sum(tmpl.nbr_users for tmpl in templates.values())
# ##### Creation of connections...
perfmon.step("FBX export prepare: Generating Connections...")
connections = []
# Objects (with classical parenting).
for ob_obj in objects:
# Bones are handled later.
if not ob_obj.is_bone:
par_obj = ob_obj.parent
# Meshes parented to armature are handled separately, yet we want the 'no parent' connection (0).
if par_obj and ob_obj.has_valid_parent(objects) and (par_obj, ob_obj) not in arm_parents:
connections.append((b"OO", ob_obj.fbx_uuid, par_obj.fbx_uuid, None))
else:
connections.append((b"OO", ob_obj.fbx_uuid, 0, None))
# Armature & Bone chains.
for bo_obj in data_bones.keys():
par_obj = bo_obj.parent
if par_obj not in objects:
continue
connections.append((b"OO", bo_obj.fbx_uuid, par_obj.fbx_uuid, None))
# Object data.
for ob_obj in objects:
if ob_obj.is_bone:
bo_data_key = data_bones[ob_obj]
connections.append((b"OO", get_fbx_uuid_from_key(bo_data_key), ob_obj.fbx_uuid, None))
else:
if ob_obj.type == 'LIGHT':
light_key = data_lights[ob_obj.bdata.data]
connections.append((b"OO", get_fbx_uuid_from_key(light_key), ob_obj.fbx_uuid, None))
elif ob_obj.type == 'CAMERA':
cam_key = data_cameras[ob_obj]
connections.append((b"OO", get_fbx_uuid_from_key(cam_key), ob_obj.fbx_uuid, None))
elif ob_obj.type == 'EMPTY' or ob_obj.type == 'ARMATURE':
empty_key = data_empties[ob_obj]
connections.append((b"OO", get_fbx_uuid_from_key(empty_key), ob_obj.fbx_uuid, None))
elif ob_obj.type in BLENDER_OBJECT_TYPES_MESHLIKE:
mesh_key, _me, _free = data_meshes[ob_obj]
connections.append((b"OO", get_fbx_uuid_from_key(mesh_key), ob_obj.fbx_uuid, None))
# Leaf Bones
for (_node_name, par_uuid, node_uuid, attr_uuid, _matrix, _hide, _size) in data_leaf_bones:
connections.append((b"OO", node_uuid, par_uuid, None))
connections.append((b"OO", attr_uuid, node_uuid, None))
# 'Shape' deformers (shape keys, only for meshes currently)...
for me_key, shapes_key, shapes in data_deformers_shape.values():
# shape -> geometry
connections.append((b"OO", get_fbx_uuid_from_key(shapes_key), get_fbx_uuid_from_key(me_key), None))
for channel_key, geom_key, _shape_verts_co, _shape_verts_idx in shapes.values():
# shape channel -> shape
connections.append((b"OO", get_fbx_uuid_from_key(channel_key), get_fbx_uuid_from_key(shapes_key), None))
# geometry (keys) -> shape channel
connections.append((b"OO", get_fbx_uuid_from_key(geom_key), get_fbx_uuid_from_key(channel_key), None))
# 'Skin' deformers (armature-to-geometry, only for meshes currently)...
for arm, deformed_meshes in data_deformers_skin.items():
for me, (skin_key, ob_obj, clusters) in deformed_meshes.items():
# skin -> geometry
mesh_key, _me, _free = data_meshes[ob_obj]
assert(me == _me)
connections.append((b"OO", get_fbx_uuid_from_key(skin_key), get_fbx_uuid_from_key(mesh_key), None))
for bo_obj, clstr_key in clusters.items():
# cluster -> skin
connections.append((b"OO", get_fbx_uuid_from_key(clstr_key), get_fbx_uuid_from_key(skin_key), None))
# bone -> cluster
connections.append((b"OO", bo_obj.fbx_uuid, get_fbx_uuid_from_key(clstr_key), None))
# Materials
mesh_material_indices = {}
_objs_indices = {}
for ma, (ma_key, ob_objs) in data_materials.items():
for ob_obj in ob_objs:
connections.append((b"OO", get_fbx_uuid_from_key(ma_key), ob_obj.fbx_uuid, None))
# Get index of this material for this object (or dupliobject).
# Material indices for mesh faces are determined by their order in 'ma to ob' connections.
# Only materials for meshes currently...
# Note in case of dupliobjects a same me/ma idx will be generated several times...
# Should not be an issue in practice, and it's needed in case we export duplis but not the original!
if ob_obj.type not in BLENDER_OBJECT_TYPES_MESHLIKE:
continue
_mesh_key, me, _free = data_meshes[ob_obj]
idx = _objs_indices[ob_obj] = _objs_indices.get(ob_obj, -1) + 1
mesh_material_indices.setdefault(me, {})[ma] = idx
del _objs_indices
for (ma, sock_name), (tex_key, fbx_prop) in data_textures.items():
ma_key, _ob_objs = data_materials[ma]
connections.append((b"OP", get_fbx_uuid_from_key(tex_key), get_fbx_uuid_from_key(ma_key), fbx_prop))
for vid, (vid_key, blender_tex_keys) in data_videos.items():
for blender_tex_key in blender_tex_keys:
tex_key, _fbx_prop = data_textures[blender_tex_key]
connections.append((b"OO", get_fbx_uuid_from_key(vid_key), get_fbx_uuid_from_key(tex_key), None))
for astack_key, astack, alayer_key, _name, _fstart, _fend in animations:
astack_id = get_fbx_uuid_from_key(astack_key)
alayer_id = get_fbx_uuid_from_key(alayer_key)
connections.append((b"OO", alayer_id, astack_id, None))
for elem_key, (alayer_key, acurvenodes) in astack.items():
elem_id = get_fbx_uuid_from_key(elem_key)
for fbx_prop, (acurvenode_key, acurves, acurvenode_name) in acurvenodes.items():
acurvenode_id = get_fbx_uuid_from_key(acurvenode_key)
connections.append((b"OO", acurvenode_id, alayer_id, None))
connections.append((b"OP", acurvenode_id, elem_id, fbx_prop.encode()))
for fbx_item, (acurve_key, default_value, acurve, acurve_valid) in acurves.items():
if acurve:
connections.append((b"OP", get_fbx_uuid_from_key(acurve_key), acurvenode_id, fbx_item.encode()))
perfmon.level_down()
h, objects, animations, animated, frame_start, frame_end,
data_empties, data_lights, data_cameras, data_meshes, mesh_material_indices,
data_bones, data_leaf_bones, data_deformers_skin, data_deformers_shape,
data_world, data_materials, data_textures, data_videos,
)
def fbx_scene_data_cleanup(scene_data):
done_meshes = set()
for me_key, me, free in scene_data.data_meshes.values():
if free and me_key not in done_meshes:
bpy.data.meshes.remove(me)
done_meshes.add(me_key)
ils.module_bl_info(sys.modules[__package__])['version']
RSION)
elem_data_single_int32(header_ext, b"EncryptionType", 0)
if time is None:
time = datetime.datetime.now()
elem = elem_empty(header_ext, b"CreationTimeStamp")
elem_data_single_int32(elem, b"Version", 1000)
elem_data_single_int32(elem, b"Year", time.year)
elem_data_single_int32(elem, b"Month", time.month)
elem_data_single_int32(elem, b"Day", time.day)
elem_data_single_int32(elem, b"Hour", time.hour)
elem_data_single_int32(elem, b"Minute", time.minute)
elem_data_single_int32(elem, b"Second", time.second)
elem_data_single_int32(elem, b"Millisecond", time.microsecond // 1000)
elem_data_single_string_unicode(header_ext, b"Creator", "%s - %s - %d.%d.%d"
% (app_name, app_ver, addon_ver[0], addon_ver[1], addon_ver[2]))
scene_info = elem_data_single_string(header_ext, b"SceneInfo", fbx_name_class(b"GlobalInfo", b"SceneInfo"))
scene_info.add_string(b"UserData")
elem_data_single_string(scene_info, b"Type", b"UserData")
elem_data_single_int32(scene_info, b"Version", FBX_SCENEINFO_VERSION)
meta_data = elem_empty(scene_info, b"MetaData")
elem_data_single_int32(meta_data, b"Version", FBX_SCENEINFO_VERSION)
elem_data_single_string(meta_data, b"Title", b"")
elem_data_single_string(meta_data, b"Subject", b"")
elem_data_single_string(meta_data, b"Author", b"")
elem_data_single_string(meta_data, b"Keywords", b"")
elem_data_single_string(meta_data, b"Revision", b"")
elem_data_single_string(meta_data, b"Comment", b"")
props = elem_properties(scene_info)
elem_props_set(props, "p_string_url", b"DocumentUrl", "/foobar.fbx")
elem_props_set(props, "p_string_url", b"SrcDocumentUrl", "/foobar.fbx")
original = elem_props_compound(props, b"Original")
original("p_string", b"ApplicationVendor", app_vendor)
original("p_string", b"ApplicationName", app_name)
original("p_string", b"ApplicationVersion", app_ver)
original("p_datetime", b"DateTime_GMT", "01/01/1970 00:00:00.000")
original("p_string", b"FileName", "/foobar.fbx")
lastsaved = elem_props_compound(props, b"LastSaved")
lastsaved("p_string", b"ApplicationVendor", app_vendor)
lastsaved("p_string", b"ApplicationName", app_name)
lastsaved("p_string", b"ApplicationVersion", app_ver)
lastsaved("p_datetime", b"DateTime_GMT", "01/01/1970 00:00:00.000")
02}:{:02}:{:03}"
"".format(time.year, time.month, time.day, time.hour, time.minute, time.second,
time.microsecond * 1000))
elem_data_single_string_unicode(root, b"Creator", "%s - %s - %d.%d.%d"
% (app_name, app_ver, addon_ver[0], addon_ver[1], addon_ver[2]))
global_settings)
up_axis, front_axis, coord_axis = RIGHT_HAND_AXES[scene_data.settings.to_axes]
gs.unit_scale
elem_props_set(props, "p_integer", b"UpAxis", up_axis[0])
elem_props_set(props, "p_integer", b"UpAxisSign", up_axis[1])
elem_props_set(props, "p_integer", b"FrontAxis", front_axis[0])
elem_props_set(props, "p_integer", b"FrontAxisSign", front_axis[1])
elem_props_set(props, "p_integer", b"CoordAxis", coord_axis[0])
elem_props_set(props, "p_integer", b"CoordAxisSign", coord_axis[1])
elem_props_set(props, "p_integer", b"OriginalUpAxis", -1)
elem_props_set(props, "p_integer", b"OriginalUpAxisSign", 1)
elem_props_set(props, "p_double", b"UnitScaleFactor", scale_factor)
elem_props_set(props, "p_double", b"OriginalUnitScaleFactor", scale_factor_org)
elem_props_set(props, "p_color_rgb", b"AmbientColor", (0.0, 0.0, 0.0))
elem_props_set(props, "p_string", b"DefaultCamera", "Producer Perspective")
# Global timing data.
r = scene.render
_, fbx_fps_mode = FBX_FRAMERATES[0] # Custom framerate.
fbx_fps = fps = r.fps / r.fps_base
for ref_fps, fps_mode in FBX_FRAMERATES:
if similar_values(fps, ref_fps):
fbx_fps = ref_fps
fbx_fps_mode = fps_mode
elem_props_set(props, "p_enum", b"TimeMode", fbx_fps_mode)
elem_props_set(props, "p_timestamp", b"TimeSpanStart", 0)
elem_props_set(props, "p_timestamp", b"TimeSpanStop", FBX_KTIME)
elem_props_set(props, "p_double", b"CustomFrameRate", fbx_fps)
# ##### End of GlobalSettings element.
def fbx_documents_elements(root, scene_data):
name = scene_data.scene.name
# ##### Start of Documents element.
docs = elem_empty(root, b"Documents")
elem_data_single_int32(docs, b"Count", 1)
doc_uid = get_fbx_uuid_from_key("__FBX_Document__" + name)
doc = elem_data_single_int64(docs, b"Document", doc_uid)
doc.add_string_unicode(name)
doc.add_string_unicode(name)
props = elem_properties(doc)
elem_props_set(props, "p_object", b"SourceObject")
elem_props_set(props, "p_string", b"ActiveAnimStackName", "")
# XXX Some kind of ID? Offset?
# Anyway, as long as we have only one doc, probably not an issue.
elem_data_single_int64(doc, b"RootNode", 0)
def fbx_references_elements(root, scene_data):
docs = elem_empty(root, b"References")
def fbx_definitions_elements(root, scene_data):
definitions = elem_empty(root, b"Definitions")
elem_data_single_int32(definitions, b"Version", FBX_TEMPLATES_VERSION)
elem_data_single_int32(definitions, b"Count", scene_data.templates_users)
fbx_templates_generate(definitions, scene_data.templates)
def fbx_objects_elements(root, scene_data):
perfmon = PerfMon()
perfmon.level_up()
objects = elem_empty(root, b"Objects")
perfmon.step("FBX export fetch empties (%d)..." % len(scene_data.data_empties))
for empty in scene_data.data_empties:
fbx_data_empty_elements(objects, empty, scene_data)
perfmon.step("FBX export fetch lamps (%d)..." % len(scene_data.data_lights))
for lamp in scene_data.data_lights:
fbx_data_light_elements(objects, lamp, scene_data)
perfmon.step("FBX export fetch cameras (%d)..." % len(scene_data.data_cameras))
for cam in scene_data.data_cameras:
fbx_data_camera_elements(objects, cam, scene_data)
perfmon.step("FBX export fetch meshes (%d)..."
% len({me_key for me_key, _me, _free in scene_data.data_meshes.values()}))
done_meshes = set()
for me_obj in scene_data.data_meshes:
fbx_data_mesh_elements(objects, me_obj, scene_data, done_meshes)
del done_meshes
perfmon.step("FBX export fetch objects (%d)..." % len(scene_data.objects))
for ob_obj in scene_data.objects:
if ob_obj.is_dupli:
continue
fbx_data_object_elements(objects, ob_obj, scene_data)
for dp_obj in ob_obj.dupli_list_gen(scene_data.depsgraph):
if dp_obj not in scene_data.objects:
continue
fbx_data_object_elements(objects, dp_obj, scene_data)
perfmon.step("FBX export fetch remaining...")
for ob_obj in scene_data.objects:
if not (ob_obj.is_object and ob_obj.type == 'ARMATURE'):
continue
fbx_data_armature_elements(objects, ob_obj, scene_data)
if scene_data.data_leaf_bones:
fbx_data_leaf_bone_elements(objects, scene_data)
for ma in scene_data.data_materials:
fbx_data_material_elements(objects, ma, scene_data)
for blender_tex_key in scene_data.data_textures:
fbx_data_texture_file_elements(objects, blender_tex_key, scene_data)
for vid in scene_data.data_videos:
fbx_data_video_elements(objects, vid, scene_data)
perfmon.step("FBX export fetch animations...")
start_time = time.process_time()
fbx_data_animation_elements(objects, scene_data)
perfmon.level_down()
def fbx_connections_elements(root, scene_data):
connections = elem_empty(root, b"Connections")
for c in scene_data.connections:
elem_connection(connections, *c)
def fbx_takes_elements(root, scene_data):
# XXX Pretty sure takes are no more needed...
takes = elem_empty(root, b"Takes")
elem_data_single_string(takes, b"Current", b"")
animations = scene_data.animations
for astack_key, animations, alayer_key, name, f_start, f_end in animations:
scene = scene_data.scene
fps = scene.render.fps / scene.render.fps_base
start_ktime = int(convert_sec_to_ktime(f_start / fps))
end_ktime = int(convert_sec_to_ktime(f_end / fps))
take = elem_data_single_string(takes, b"Take", name)
elem_data_single_string(take, b"FileName", name + b".tak")
take_loc_time = elem_data_single_int64(take, b"LocalTime", start_ktime)
take_loc_time.add_int64(end_ktime)
take_ref_time = elem_data_single_int64(take, b"ReferenceTime", start_ktime)
take_ref_time.add_int64(end_ktime)
# ##### "Main" functions. #####
# This func can be called with just the filepath
def save_single(operator, scene, depsgraph, filepath="",
global_matrix=Matrix(),
apply_unit_scale=False,
global_scale=1.0,
apply_scale_options='FBX_SCALE_NONE',
axis_up="Z",
axis_forward="Y",
context_objects=None,
object_types=None,
use_mesh_modifiers=True,
use_mesh_modifiers_render=True,
mesh_smooth_type='FACE',
use_subsurf=False,
use_armature_deform_only=False,
bake_anim=True,
bake_anim_use_all_bones=True,
bake_anim_use_nla_strips=True,
bake_anim_use_all_actions=True,
bake_anim_step=1.0,
bake_anim_simplify_factor=1.0,
bake_anim_force_startend_keying=True,
add_leaf_bones=False,
primary_bone_axis='Y',
secondary_bone_axis='X',
use_metadata=True,
path_mode='AUTO',
use_mesh_edges=True,
use_tspace=True,
embed_textures=False,
use_custom_props=False,
bake_space_transform=False,
armature_nodetype='NULL',
**kwargs
):
# Clear cached ObjectWrappers (just in case...).
ObjectWrapper.cache_clear()
if object_types is None:
object_types = {'EMPTY', 'CAMERA', 'LIGHT', 'ARMATURE', 'MESH', 'OTHER'}
if 'OTHER' in object_types:
object_types |= BLENDER_OTHER_OBJECT_TYPES
# Default Blender unit is equivalent to meter, while FBX one is centimeter...
unit_scale = units_blender_to_fbx_factor(scene) if apply_unit_scale else 100.0
if apply_scale_options == 'FBX_SCALE_NONE':
global_matrix = Matrix.Scale(unit_scale * global_scale, 4) @ global_matrix
unit_scale = 1.0
elif apply_scale_options == 'FBX_SCALE_UNITS':
global_matrix = Matrix.Scale(global_scale, 4) @ global_matrix
elif apply_scale_options == 'FBX_SCALE_CUSTOM':
global_matrix = Matrix.Scale(unit_scale, 4) @ global_matrix
unit_scale = global_scale
else: # if apply_scale_options == 'FBX_SCALE_ALL':
unit_scale = global_scale * unit_scale
global_scale = global_matrix.median_scale
global_matrix_inv = global_matrix.inverted()
# For transforming mesh normals.
global_matrix_inv_transposed = global_matrix_inv.transposed()
# Only embed textures in COPY mode!
if embed_textures and path_mode != 'COPY':
embed_textures = False
# Calculate bone correction matrix
bone_correction_matrix = None # Default is None = no change
bone_correction_matrix_inv = None
if (primary_bone_axis, secondary_bone_axis) != ('Y', 'X'):
from bpy_extras.io_utils import axis_conversion
bone_correction_matrix = axis_conversion(from_forward=secondary_bone_axis,
from_up=primary_bone_axis,
to_forward='X',
to_up='Y',
).to_4x4()
bone_correction_matrix_inv = bone_correction_matrix.inverted()
media_settings = FBXExportSettingsMedia(
path_mode,
os.path.dirname(bpy.data.filepath), # base_src
os.path.dirname(filepath), # base_dst
# Local dir where to put images (media), using FBX conventions.
os.path.splitext(os.path.basename(filepath))[0] + ".fbm", # subdir
embed_textures,
set(), # copy_set
set(), # embedded_set
)
settings = FBXExportSettings(
operator.report, (axis_up, axis_forward), global_matrix, global_scale, apply_unit_scale, unit_scale,
bake_space_transform, global_matrix_inv, global_matrix_inv_transposed,
context_objects, object_types, use_mesh_modifiers, use_mesh_modifiers_render,
mesh_smooth_type, use_subsurf, use_mesh_edges, use_tspace,
armature_nodetype, use_armature_deform_only,
add_leaf_bones, bone_correction_matrix, bone_correction_matrix_inv,
bake_anim, bake_anim_use_all_bones, bake_anim_use_nla_strips, bake_anim_use_all_actions,
bake_anim_step, bake_anim_simplify_factor, bake_anim_force_startend_keying,
False, media_settings, use_custom_props,
)
import bpy_extras.io_utils
print('\nFBX export starting... %r' % filepath)
start_time = time.process_time()
# Generate some data about exported scene...
scene_data = fbx_data_from_scene(scene, depsgraph, settings)
root = elem_empty(None, b"") # Root element has no id, as it is not saved per se!
# Mostly FBXHeaderExtension and GlobalSettings.
fbx_header_elements(root, scene_data)
# Documents and References are pretty much void currently.
fbx_documents_elements(root, scene_data)
fbx_references_elements(root, scene_data)
# Templates definitions.
fbx_definitions_elements(root, scene_data)
# Actual data.
fbx_objects_elements(root, scene_data)
# How data are inter-connected.
fbx_connections_elements(root, scene_data)
# Animation.
fbx_takes_elements(root, scene_data)
# Cleanup!
fbx_scene_data_cleanup(scene_data)
# And we are down, we can write the whole thing!
encode_bin.write(filepath, root, FBX_VERSION)
# Clear cached ObjectWrappers!
ObjectWrapper.cache_clear()
# copy all collected files, if we did not embed them.
if not media_settings.embed_textures:
bpy_extras.io_utils.path_reference_copy(media_settings.copy_set)
print('export finished in %.4f sec.' % (time.process_time() - start_time))
return {'FINISHED'}
# defaults for applications, currently only unity but could add others.
def defaults_unity3d():
return {
# These options seem to produce the same result as the old Ascii exporter in Unity3D:
"axis_up": 'Y',
"axis_forward": '-Z',
"global_matrix": Matrix.Rotation(-math.pi / 2.0, 4, 'X'),
# Should really be True, but it can cause problems if a model is already in a scene or prefab
# with the old transforms.
"bake_space_transform": False,
"use_selection": False,
"object_types": {'ARMATURE', 'EMPTY', 'MESH', 'OTHER'},
"use_mesh_modifiers": True,
"use_mesh_modifiers_render": True,
"use_mesh_edges": False,
"mesh_smooth_type": 'FACE',
"use_subsurf": False,
"use_tspace": False, # XXX Why? Unity is expected to support tspace import...
"use_armature_deform_only": True,
"use_custom_props": True,
"bake_anim": True,
"bake_anim_simplify_factor": 1.0,
"bake_anim_step": 1.0,
"bake_anim_use_nla_strips": True,
"bake_anim_use_all_actions": True,
"add_leaf_bones": False, # Avoid memory/performance cost for something only useful for modelling
"primary_bone_axis": 'Y', # Doesn't really matter for Unity, so leave unchanged
"secondary_bone_axis": 'X',
"path_mode": 'AUTO',
"embed_textures": False,
"batch_mode": 'OFF',
}
def save(operator, context,
filepath="",
use_selection=False,
use_active_collection=False,
batch_mode='OFF',
use_batch_own_dir=False,
**kwargs
):
ret = {'FINISHED'}
active_object = context.view_layer.objects.active
org_mode = None
if active_object and active_object.mode != 'OBJECT' and bpy.ops.object.mode_set.poll():
org_mode = active_object.mode
bpy.ops.object.mode_set(mode='OBJECT')
if batch_mode == 'OFF':
kwargs_mod = kwargs.copy()
if use_active_collection:
if use_selection:
ctx_objects = tuple(obj
for obj in context.view_layer.active_layer_collection.collection.all_objects
if obj.select_get())
else:
ctx_objects = context.view_layer.active_layer_collection.collection.all_objects
else:
if use_selection:
ctx_objects = context.selected_objects
else:
ctx_objects = context.view_layer.objects
kwargs_mod["context_objects"] = ctx_objects
depsgraph = context.evaluated_depsgraph_get()
ret = save_single(operator, context.scene, depsgraph, filepath, **kwargs_mod)
else:
fbxpath = filepath
prefix = os.path.basename(fbxpath)
if prefix:
fbxpath = os.path.dirname(fbxpath)
if batch_mode == 'COLLECTION':
data_seq = tuple((coll, coll.name, 'objects') for coll in bpy.data.collections if coll.objects)
elif batch_mode in {'SCENE_COLLECTION', 'ACTIVE_SCENE_COLLECTION'}:
scenes = [context.scene] if batch_mode == 'ACTIVE_SCENE_COLLECTION' else bpy.data.scenes
data_seq = []
for scene in scenes:
if not scene.objects:
continue
# Needed to avoid having tens of 'Master Collection' entries.
todo_collections = [(scene.collection, "_".join((scene.name, scene.collection.name)))]
while todo_collections:
coll, coll_name = todo_collections.pop()
todo_collections.extend(((c, c.name) for c in coll.children if c.all_objects))
data_seq.append((coll, coll_name, 'all_objects'))
else:
data_seq = tuple((scene, scene.name, 'objects') for scene in bpy.data.scenes if scene.objects)
# call this function within a loop with BATCH_ENABLE == False
new_fbxpath = fbxpath # own dir option modifies, we need to keep an original
for data, data_name, data_obj_propname in data_seq: # scene or collection
newname = "_".join((prefix, bpy.path.clean_name(data_name))) if prefix else bpy.path.clean_name(data_name)
if use_batch_own_dir:
new_fbxpath = os.path.join(fbxpath, newname)
# path may already exist... and be a file.
while os.path.isfile(new_fbxpath):
new_fbxpath = "_".join((new_fbxpath, "dir"))
if not os.path.exists(new_fbxpath):
os.makedirs(new_fbxpath)
filepath = os.path.join(new_fbxpath, newname + '.fbx')
print('\nBatch exporting %s as...\n\t%r' % (data, filepath))
if batch_mode in {'COLLECTION', 'SCENE_COLLECTION', 'ACTIVE_SCENE_COLLECTION'}:
# Collection, so that objects update properly, add a dummy scene.
scene = bpy.data.scenes.new(name="FBX_Temp")
src_scenes = {} # Count how much each 'source' scenes are used.
for obj in getattr(data, data_obj_propname):
for src_sce in obj.users_scene:
src_scenes[src_sce] = src_scenes.setdefault(src_sce, 0) + 1
scene.collection.objects.link(obj)
# Find the 'most used' source scene, and use its unit settings. This is somewhat weak, but should work
# fine in most cases, and avoids stupid issues like T41931.
best_src_scene = None
best_src_scene_users = -1
for sce, nbr_users in src_scenes.items():
if (nbr_users) > best_src_scene_users:
best_src_scene_users = nbr_users
best_src_scene = sce
scene.unit_settings.system = best_src_scene.unit_settings.system
scene.unit_settings.system_rotation = best_src_scene.unit_settings.system_rotation
scene.unit_settings.scale_length = best_src_scene.unit_settings.scale_length
# new scene [only one viewlayer to update]
scene.view_layers[0].update()
# TODO - BUMMER! Armatures not in the group wont animate the mesh
else:
scene = data
kwargs_batch = kwargs.copy()
kwargs_batch["context_objects"] = getattr(data, data_obj_propname)
save_single(operator, scene, scene.view_layers[0].depsgraph, filepath, **kwargs_batch)
if batch_mode in {'COLLECTION', 'SCENE_COLLECTION', 'ACTIVE_SCENE_COLLECTION'}:
# Remove temp collection scene.
bpy.data.scenes.remove(scene)
if active_object and org_mode:
context.view_layer.objects.active = active_object
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode=org_mode)
return ret
| true
| true
|
1c4319290a772ff076d1c754c3360cc8808b20b6
| 3,196
|
py
|
Python
|
pysurf/spp/request.py
|
MFSJMenger/pysurf
|
99c6a94d4cb5046f16a0961b907061d989ffb6dc
|
[
"Apache-2.0"
] | 7
|
2020-10-28T13:46:08.000Z
|
2021-05-27T06:41:56.000Z
|
pysurf/spp/request.py
|
MFSJMenger/pysurf
|
99c6a94d4cb5046f16a0961b907061d989ffb6dc
|
[
"Apache-2.0"
] | 2
|
2020-10-27T19:15:12.000Z
|
2020-10-27T19:15:25.000Z
|
pysurf/spp/request.py
|
MFSJMenger/pysurf
|
99c6a94d4cb5046f16a0961b907061d989ffb6dc
|
[
"Apache-2.0"
] | 2
|
2021-04-15T05:54:30.000Z
|
2022-02-08T00:10:10.000Z
|
from collections.abc import Mapping
import numpy as np
class RequestGenerator:
"""Abstraction to generate Requests consistently"""
def __init__(self, nstates, properties=None, use_db=False):
self.nstates = nstates
# properties that are always asked for (database)
if properties is None:
properties = []
#
self._request_always = properties
#
if use_db is True:
self.request = self._request_all
else:
self.request = self._request
def _request(self, crd, properties, states=None, same_crd=False):
"""add more sanity checks!"""
properties = properties + self._request_always
if states is None:
return Request(crd, properties, list(range(self.nstates)), same_crd=same_crd)
return Request(crd, properties, states, same_crd=same_crd)
def _request_all(self, crd, properties, states=None, same_crd=False):
properties = properties + self._request_always
return Request(crd, properties, list(range(self.nstates)), same_crd=same_crd)
class StateData:
def __init__(self, states, shape):
self._states = states
sh = tuple([len(states)] + list(shape))
self.data = np.empty(sh, dtype=np.double)
def set_data(self, data):
"""try to set everything"""
data = np.asarray(data)
data = data.reshape(self.data.shape)
self.data[:] = data
def __setitem__(self, istate, value):
idx = self._states.index(istate)
self.data[idx] = value
def __getitem__(self, istate):
idx = self._states.index(istate)
return self.data[idx]
class Request(Mapping):
def __init__(self, crd, properties, states, same_crd=False):
self._properties = {prop: None for prop in properties if prop != 'crd'}
self.states = states
self.crd = np.array(crd)
self.same_crd = same_crd
#
if 'gradient' in properties:
self._properties['gradient'] = StateData(states, self.crd.shape)
def set(self, name, value):
"""Ignore properties that are not requested!"""
if name not in self._properties:
return
prop = self._properties[name]
if isinstance(prop, StateData):
self._set_state_dictionary(prop, value)
else:
self._properties[name] = value
def __getitem__(self, key):
return self._properties[key]
def __len__(self):
return len(self._properties)
def __iter__(self):
return iter(self._properties)
def iter_data(self):
"""Iterate over all data in the request dct"""
for key, value in self._properties.items():
if isinstance(value, StateData):
yield key, value.data
else:
yield key, value
def _set_state_dictionary(self, prop, dct):
"""Set stateData"""
if not isinstance(dct, Mapping):
prop.set_data(dct)
return
#
for state, value in dct.items():
try:
prop[state] = value
except ValueError:
pass
| 30.730769
| 89
| 0.602003
|
from collections.abc import Mapping
import numpy as np
class RequestGenerator:
def __init__(self, nstates, properties=None, use_db=False):
self.nstates = nstates
if properties is None:
properties = []
self._request_always = properties
if use_db is True:
self.request = self._request_all
else:
self.request = self._request
def _request(self, crd, properties, states=None, same_crd=False):
properties = properties + self._request_always
if states is None:
return Request(crd, properties, list(range(self.nstates)), same_crd=same_crd)
return Request(crd, properties, states, same_crd=same_crd)
def _request_all(self, crd, properties, states=None, same_crd=False):
properties = properties + self._request_always
return Request(crd, properties, list(range(self.nstates)), same_crd=same_crd)
class StateData:
def __init__(self, states, shape):
self._states = states
sh = tuple([len(states)] + list(shape))
self.data = np.empty(sh, dtype=np.double)
def set_data(self, data):
data = np.asarray(data)
data = data.reshape(self.data.shape)
self.data[:] = data
def __setitem__(self, istate, value):
idx = self._states.index(istate)
self.data[idx] = value
def __getitem__(self, istate):
idx = self._states.index(istate)
return self.data[idx]
class Request(Mapping):
def __init__(self, crd, properties, states, same_crd=False):
self._properties = {prop: None for prop in properties if prop != 'crd'}
self.states = states
self.crd = np.array(crd)
self.same_crd = same_crd
if 'gradient' in properties:
self._properties['gradient'] = StateData(states, self.crd.shape)
def set(self, name, value):
if name not in self._properties:
return
prop = self._properties[name]
if isinstance(prop, StateData):
self._set_state_dictionary(prop, value)
else:
self._properties[name] = value
def __getitem__(self, key):
return self._properties[key]
def __len__(self):
return len(self._properties)
def __iter__(self):
return iter(self._properties)
def iter_data(self):
for key, value in self._properties.items():
if isinstance(value, StateData):
yield key, value.data
else:
yield key, value
def _set_state_dictionary(self, prop, dct):
if not isinstance(dct, Mapping):
prop.set_data(dct)
return
for state, value in dct.items():
try:
prop[state] = value
except ValueError:
pass
| true
| true
|
1c4319f777093247b8f9e9c7bd3a0e82affbfe84
| 4,744
|
py
|
Python
|
visigoth/utils/hue_manager/discrete_hue_manager.py
|
visigoths/visigoth
|
c5297148209d630f6668f0e5ba3039a8856d8320
|
[
"MIT"
] | null | null | null |
visigoth/utils/hue_manager/discrete_hue_manager.py
|
visigoths/visigoth
|
c5297148209d630f6668f0e5ba3039a8856d8320
|
[
"MIT"
] | 1
|
2021-01-26T16:55:48.000Z
|
2021-09-03T15:29:14.000Z
|
visigoth/utils/hue_manager/discrete_hue_manager.py
|
visigoths/visigoth
|
c5297148209d630f6668f0e5ba3039a8856d8320
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# visigoth: A lightweight Python3 library for rendering data visualizations in SVG
# Copyright (C) 2020-2021 Visigoth Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from visigoth.utils.hue_manager.hue_manager import HueManager
from visigoth.internal.utils.hue.hue import Hue
from visigoth.internal.utils.hue.colormaps import DiscreteColourMaps
class DiscreteHueManager(HueManager):
def __init__(self,hueMap="pastel",defaultHue="gray"):
"""
Create a hue_manager mapping discrete values to hues
Arguments:
hueMap(str): the name of a hueMap (see Notes) OR a list of hue names
Keyword Arguments:
defaultHue(str): the name of the default hue to use (to represent unmapped/undefined values)
Notes:
A list of the names and numbers of hues in each map is:
"deep": 10
"deep6": 6
"muted": 10
"muted6": 6
"pastel": 10
"pastel6": 6
"bright": 10
"bright6": 6
"dark": 10
"dark6": 6
"colorblind": 10
"colorblind6":6
"""
super(DiscreteHueManager,self).__init__(defaultHue)
self.built = False
self.categories = []
self.categorylist = []
self.hueMap = DiscreteColourMaps[hueMap] if hueMap else None
self.opacity = 1.0
self.value_labels = {}
self.hue_lookup = {}
@staticmethod
def listHueMaps():
return sorted(DiscreteColourMaps.keys())
def isDiscrete(self):
return True
def addHue(self,category,hue,label=None):
if label is None:
label = str(category)
self.categories.append((category,hue))
self.categorylist.append(category)
self.value_labels[category] = label
return self
def getCategories(self):
return self.categories
def allocateHue(self,value):
if value is not None:
if value not in self.categorylist:
self.categorylist.append(value)
def build(self):
# build a hue lookup table
if not self.built:
final_categories=[]
for (cat,col) in self.categories:
col = Hue.applyOpacity(col, self.opacity)
self.hue_lookup[cat] = col
final_categories.append((cat,col))
# assign hues to all unassigned categories from the hue map
cm_index = 0
for category in self.categorylist:
if category not in self.hue_lookup:
if not self.hueMap:
raise Exception("Please define a hue map")
col = self.hueMap[cm_index]
col = Hue.applyOpacity(col, self.opacity)
self.hue_lookup[category] = col
cm_index += 1
cm_index = cm_index % len(self.hueMap)
final_categories.append((category,col))
self.categories = final_categories
# apply opacity to the default hue
self.setDefaultHue(Hue.applyOpacity(self.getDefaultHue(),self.opacity))
self.built = True
def getHue(self,value):
if value is None:
return self.getDefaultHue()
else:
if value in self.hue_lookup:
return self.hue_lookup[value]
else:
return self.getDefaultHue()
def getLabel(self,value):
return self.value_labels.get(value,str(value))
def setOpacity(self,opacity):
self.opacity = opacity
def getOpacity(self):
return self.opacity
| 36.775194
| 104
| 0.621206
|
from visigoth.utils.hue_manager.hue_manager import HueManager
from visigoth.internal.utils.hue.hue import Hue
from visigoth.internal.utils.hue.colormaps import DiscreteColourMaps
class DiscreteHueManager(HueManager):
def __init__(self,hueMap="pastel",defaultHue="gray"):
super(DiscreteHueManager,self).__init__(defaultHue)
self.built = False
self.categories = []
self.categorylist = []
self.hueMap = DiscreteColourMaps[hueMap] if hueMap else None
self.opacity = 1.0
self.value_labels = {}
self.hue_lookup = {}
@staticmethod
def listHueMaps():
return sorted(DiscreteColourMaps.keys())
def isDiscrete(self):
return True
def addHue(self,category,hue,label=None):
if label is None:
label = str(category)
self.categories.append((category,hue))
self.categorylist.append(category)
self.value_labels[category] = label
return self
def getCategories(self):
return self.categories
def allocateHue(self,value):
if value is not None:
if value not in self.categorylist:
self.categorylist.append(value)
def build(self):
if not self.built:
final_categories=[]
for (cat,col) in self.categories:
col = Hue.applyOpacity(col, self.opacity)
self.hue_lookup[cat] = col
final_categories.append((cat,col))
cm_index = 0
for category in self.categorylist:
if category not in self.hue_lookup:
if not self.hueMap:
raise Exception("Please define a hue map")
col = self.hueMap[cm_index]
col = Hue.applyOpacity(col, self.opacity)
self.hue_lookup[category] = col
cm_index += 1
cm_index = cm_index % len(self.hueMap)
final_categories.append((category,col))
self.categories = final_categories
self.setDefaultHue(Hue.applyOpacity(self.getDefaultHue(),self.opacity))
self.built = True
def getHue(self,value):
if value is None:
return self.getDefaultHue()
else:
if value in self.hue_lookup:
return self.hue_lookup[value]
else:
return self.getDefaultHue()
def getLabel(self,value):
return self.value_labels.get(value,str(value))
def setOpacity(self,opacity):
self.opacity = opacity
def getOpacity(self):
return self.opacity
| true
| true
|
1c431a04b0e307d352621e61ed087ea63836683d
| 183
|
py
|
Python
|
Court-APP/users/admin.py
|
mjhow4/attendance-app
|
726577ea60f53f35c522c322ca6e81c7e3e8856b
|
[
"MIT"
] | null | null | null |
Court-APP/users/admin.py
|
mjhow4/attendance-app
|
726577ea60f53f35c522c322ca6e81c7e3e8856b
|
[
"MIT"
] | null | null | null |
Court-APP/users/admin.py
|
mjhow4/attendance-app
|
726577ea60f53f35c522c322ca6e81c7e3e8856b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import NewUser, CustomAccountManager
admin.site.register(NewUser, CustomAccountManager)
| 30.5
| 50
| 0.852459
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import NewUser, CustomAccountManager
admin.site.register(NewUser, CustomAccountManager)
| true
| true
|
1c431a439cff4e67b136f223f8bc58eae3b1f40a
| 1,117
|
py
|
Python
|
tests/conftest.py
|
gitter-badger/a2ml
|
1d9ef6657645b61c64090284ed8fadb1a68b932c
|
[
"Apache-2.0"
] | 30
|
2019-07-01T13:23:27.000Z
|
2022-03-16T21:19:33.000Z
|
tests/conftest.py
|
gitter-badger/a2ml
|
1d9ef6657645b61c64090284ed8fadb1a68b932c
|
[
"Apache-2.0"
] | 234
|
2019-07-04T13:56:15.000Z
|
2021-11-04T10:12:55.000Z
|
tests/conftest.py
|
gitter-badger/a2ml
|
1d9ef6657645b61c64090284ed8fadb1a68b932c
|
[
"Apache-2.0"
] | 13
|
2019-07-04T14:00:34.000Z
|
2020-07-13T11:18:44.000Z
|
import os
import pytest
import shutil
import logging
import json
from click.testing import CliRunner
from a2ml.api.utils.context import Context
@pytest.fixture
def ctx():
# load config(s) from the test app
return Context(debug=True)
@pytest.fixture
def runner():
return CliRunner()
@pytest.fixture(scope="function")
def isolated(runner):
with runner.isolated_filesystem():
yield runner
@pytest.fixture
def log(caplog):
caplog.set_level(logging.INFO)
return caplog
@pytest.fixture
def project(isolated):
source = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fixtures',
'cli-integration-test')
shutil.copytree(source, './cli-integration-test')
os.chdir('cli-integration-test')
TEST_AUGER_CREDENTIALS = {
'username': 'test_user',
'organization': 'auger',
'api_url': 'https://example.com',
'token': 'fake_token',
}
@pytest.fixture
def auger_authenticated(monkeypatch, isolated):
monkeypatch.setenv("AUGER_CREDENTIALS", json.dumps(TEST_AUGER_CREDENTIALS))
#monkeypatch.setenv("AUGER_CREDENTIALS_PATH", os.getcwd())
| 22.795918
| 79
| 0.716204
|
import os
import pytest
import shutil
import logging
import json
from click.testing import CliRunner
from a2ml.api.utils.context import Context
@pytest.fixture
def ctx():
return Context(debug=True)
@pytest.fixture
def runner():
return CliRunner()
@pytest.fixture(scope="function")
def isolated(runner):
with runner.isolated_filesystem():
yield runner
@pytest.fixture
def log(caplog):
caplog.set_level(logging.INFO)
return caplog
@pytest.fixture
def project(isolated):
source = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fixtures',
'cli-integration-test')
shutil.copytree(source, './cli-integration-test')
os.chdir('cli-integration-test')
TEST_AUGER_CREDENTIALS = {
'username': 'test_user',
'organization': 'auger',
'api_url': 'https://example.com',
'token': 'fake_token',
}
@pytest.fixture
def auger_authenticated(monkeypatch, isolated):
monkeypatch.setenv("AUGER_CREDENTIALS", json.dumps(TEST_AUGER_CREDENTIALS))
| true
| true
|
1c431a9d6a3aa2af83b23411ebb9876266948588
| 16,154
|
py
|
Python
|
sdk/python/tests/integration/registration/test_feature_store.py
|
potatochip/feast
|
bf557bcb72c7878a16dccb48443bbbe9dc3efa49
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/tests/integration/registration/test_feature_store.py
|
potatochip/feast
|
bf557bcb72c7878a16dccb48443bbbe9dc3efa49
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/tests/integration/registration/test_feature_store.py
|
potatochip/feast
|
bf557bcb72c7878a16dccb48443bbbe9dc3efa49
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from datetime import datetime, timedelta
from tempfile import mkstemp
import pytest
from pytest_lazyfixture import lazy_fixture
from feast import FileSource
from feast.data_format import ParquetFormat
from feast.entity import Entity
from feast.feature import Feature
from feast.feature_store import FeatureStore
from feast.feature_view import FeatureView
from feast.infra.offline_stores.file import FileOfflineStoreConfig
from feast.infra.online_stores.dynamodb import DynamoDBOnlineStoreConfig
from feast.infra.online_stores.sqlite import SqliteOnlineStoreConfig
from feast.protos.feast.types import Value_pb2 as ValueProto
from feast.repo_config import RepoConfig
from feast.value_type import ValueType
from tests.utils.data_source_utils import (
prep_file_source,
simple_bq_source_using_query_arg,
simple_bq_source_using_table_ref_arg,
)
@pytest.fixture
def feature_store_with_local_registry():
fd, registry_path = mkstemp()
fd, online_store_path = mkstemp()
return FeatureStore(
config=RepoConfig(
registry=registry_path,
project="default",
provider="local",
online_store=SqliteOnlineStoreConfig(path=online_store_path),
)
)
@pytest.fixture
def feature_store_with_gcs_registry():
from google.cloud import storage
storage_client = storage.Client()
bucket_name = f"feast-registry-test-{int(time.time() * 1000)}"
bucket = storage_client.bucket(bucket_name)
bucket = storage_client.create_bucket(bucket)
bucket.add_lifecycle_delete_rule(
age=14
) # delete buckets automatically after 14 days
bucket.patch()
bucket.blob("registry.db")
return FeatureStore(
config=RepoConfig(
registry=f"gs://{bucket_name}/registry.db",
project="default",
provider="gcp",
)
)
@pytest.fixture
def feature_store_with_s3_registry():
return FeatureStore(
config=RepoConfig(
registry=f"s3://feast-integration-tests/registries/{int(time.time() * 1000)}/registry.db",
project="default",
provider="aws",
online_store=DynamoDBOnlineStoreConfig(region="us-west-2"),
offline_store=FileOfflineStoreConfig(),
)
)
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
def test_apply_entity_success(test_feature_store):
entity = Entity(
name="driver_car_id",
description="Car driver id",
value_type=ValueType.STRING,
labels={"team": "matchmaking"},
)
# Register Entity
test_feature_store.apply(entity)
entities = test_feature_store.list_entities()
entity = entities[0]
assert (
len(entities) == 1
and entity.name == "driver_car_id"
and entity.value_type == ValueType(ValueProto.ValueType.STRING)
and entity.description == "Car driver id"
and "team" in entity.labels
and entity.labels["team"] == "matchmaking"
)
test_feature_store.teardown()
@pytest.mark.integration
@pytest.mark.parametrize(
"test_feature_store",
[
lazy_fixture("feature_store_with_gcs_registry"),
lazy_fixture("feature_store_with_s3_registry"),
],
)
def test_apply_entity_integration(test_feature_store):
entity = Entity(
name="driver_car_id",
description="Car driver id",
value_type=ValueType.STRING,
labels={"team": "matchmaking"},
)
# Register Entity
test_feature_store.apply([entity])
entities = test_feature_store.list_entities()
entity = entities[0]
assert (
len(entities) == 1
and entity.name == "driver_car_id"
and entity.value_type == ValueType(ValueProto.ValueType.STRING)
and entity.description == "Car driver id"
and "team" in entity.labels
and entity.labels["team"] == "matchmaking"
)
entity = test_feature_store.get_entity("driver_car_id")
assert (
entity.name == "driver_car_id"
and entity.value_type == ValueType(ValueProto.ValueType.STRING)
and entity.description == "Car driver id"
and "team" in entity.labels
and entity.labels["team"] == "matchmaking"
)
test_feature_store.teardown()
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
def test_apply_feature_view_success(test_feature_store):
# Create Feature Views
batch_source = FileSource(
file_format=ParquetFormat(),
path="file://feast/*",
event_timestamp_column="ts_col",
created_timestamp_column="timestamp",
date_partition_column="date_partition_col",
)
fv1 = FeatureView(
name="my_feature_view_1",
features=[
Feature(name="fs1_my_feature_1", dtype=ValueType.INT64),
Feature(name="fs1_my_feature_2", dtype=ValueType.STRING),
Feature(name="fs1_my_feature_3", dtype=ValueType.STRING_LIST),
Feature(name="fs1_my_feature_4", dtype=ValueType.BYTES_LIST),
],
entities=["fs1_my_entity_1"],
tags={"team": "matchmaking"},
input=batch_source,
ttl=timedelta(minutes=5),
)
# Register Feature View
test_feature_store.apply([fv1])
feature_views = test_feature_store.list_feature_views()
# List Feature Views
assert (
len(feature_views) == 1
and feature_views[0].name == "my_feature_view_1"
and feature_views[0].features[0].name == "fs1_my_feature_1"
and feature_views[0].features[0].dtype == ValueType.INT64
and feature_views[0].features[1].name == "fs1_my_feature_2"
and feature_views[0].features[1].dtype == ValueType.STRING
and feature_views[0].features[2].name == "fs1_my_feature_3"
and feature_views[0].features[2].dtype == ValueType.STRING_LIST
and feature_views[0].features[3].name == "fs1_my_feature_4"
and feature_views[0].features[3].dtype == ValueType.BYTES_LIST
and feature_views[0].entities[0] == "fs1_my_entity_1"
)
test_feature_store.teardown()
@pytest.mark.integration
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
@pytest.mark.parametrize("dataframe_source", [lazy_fixture("simple_dataset_1")])
def test_feature_view_inference_success(test_feature_store, dataframe_source):
with prep_file_source(
df=dataframe_source, event_timestamp_column="ts_1"
) as file_source:
fv1 = FeatureView(
name="fv1",
entities=["id"],
ttl=timedelta(minutes=5),
online=True,
input=file_source,
tags={},
)
fv2 = FeatureView(
name="fv2",
entities=["id"],
ttl=timedelta(minutes=5),
online=True,
input=simple_bq_source_using_table_ref_arg(dataframe_source, "ts_1"),
tags={},
)
fv3 = FeatureView(
name="fv3",
entities=["id"],
ttl=timedelta(minutes=5),
online=True,
input=simple_bq_source_using_query_arg(dataframe_source, "ts_1"),
tags={},
)
test_feature_store.apply([fv1, fv2, fv3]) # Register Feature Views
feature_view_1 = test_feature_store.list_feature_views()[0]
feature_view_2 = test_feature_store.list_feature_views()[1]
feature_view_3 = test_feature_store.list_feature_views()[2]
actual_file_source = {
(feature.name, feature.dtype) for feature in feature_view_1.features
}
actual_bq_using_table_ref_arg_source = {
(feature.name, feature.dtype) for feature in feature_view_2.features
}
actual_bq_using_query_arg_source = {
(feature.name, feature.dtype) for feature in feature_view_3.features
}
expected = {
("float_col", ValueType.DOUBLE),
("int64_col", ValueType.INT64),
("string_col", ValueType.STRING),
}
assert (
expected
== actual_file_source
== actual_bq_using_table_ref_arg_source
== actual_bq_using_query_arg_source
)
test_feature_store.teardown()
@pytest.mark.integration
@pytest.mark.parametrize(
"test_feature_store",
[
lazy_fixture("feature_store_with_gcs_registry"),
lazy_fixture("feature_store_with_s3_registry"),
],
)
def test_apply_feature_view_integration(test_feature_store):
# Create Feature Views
batch_source = FileSource(
file_format=ParquetFormat(),
path="file://feast/*",
event_timestamp_column="ts_col",
created_timestamp_column="timestamp",
date_partition_column="date_partition_col",
)
fv1 = FeatureView(
name="my_feature_view_1",
features=[
Feature(name="fs1_my_feature_1", dtype=ValueType.INT64),
Feature(name="fs1_my_feature_2", dtype=ValueType.STRING),
Feature(name="fs1_my_feature_3", dtype=ValueType.STRING_LIST),
Feature(name="fs1_my_feature_4", dtype=ValueType.BYTES_LIST),
],
entities=["fs1_my_entity_1"],
tags={"team": "matchmaking"},
input=batch_source,
ttl=timedelta(minutes=5),
)
# Register Feature View
test_feature_store.apply([fv1])
feature_views = test_feature_store.list_feature_views()
# List Feature Views
assert (
len(feature_views) == 1
and feature_views[0].name == "my_feature_view_1"
and feature_views[0].features[0].name == "fs1_my_feature_1"
and feature_views[0].features[0].dtype == ValueType.INT64
and feature_views[0].features[1].name == "fs1_my_feature_2"
and feature_views[0].features[1].dtype == ValueType.STRING
and feature_views[0].features[2].name == "fs1_my_feature_3"
and feature_views[0].features[2].dtype == ValueType.STRING_LIST
and feature_views[0].features[3].name == "fs1_my_feature_4"
and feature_views[0].features[3].dtype == ValueType.BYTES_LIST
and feature_views[0].entities[0] == "fs1_my_entity_1"
)
feature_view = test_feature_store.get_feature_view("my_feature_view_1")
assert (
feature_view.name == "my_feature_view_1"
and feature_view.features[0].name == "fs1_my_feature_1"
and feature_view.features[0].dtype == ValueType.INT64
and feature_view.features[1].name == "fs1_my_feature_2"
and feature_view.features[1].dtype == ValueType.STRING
and feature_view.features[2].name == "fs1_my_feature_3"
and feature_view.features[2].dtype == ValueType.STRING_LIST
and feature_view.features[3].name == "fs1_my_feature_4"
and feature_view.features[3].dtype == ValueType.BYTES_LIST
and feature_view.entities[0] == "fs1_my_entity_1"
)
test_feature_store.delete_feature_view("my_feature_view_1")
feature_views = test_feature_store.list_feature_views()
assert len(feature_views) == 0
test_feature_store.teardown()
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
def test_apply_object_and_read(test_feature_store):
assert isinstance(test_feature_store, FeatureStore)
# Create Feature Views
batch_source = FileSource(
file_format=ParquetFormat(),
path="file://feast/*",
event_timestamp_column="ts_col",
created_timestamp_column="timestamp",
)
e1 = Entity(
name="fs1_my_entity_1", value_type=ValueType.STRING, description="something"
)
e2 = Entity(
name="fs1_my_entity_2", value_type=ValueType.STRING, description="something"
)
fv1 = FeatureView(
name="my_feature_view_1",
features=[
Feature(name="fs1_my_feature_1", dtype=ValueType.INT64),
Feature(name="fs1_my_feature_2", dtype=ValueType.STRING),
Feature(name="fs1_my_feature_3", dtype=ValueType.STRING_LIST),
Feature(name="fs1_my_feature_4", dtype=ValueType.BYTES_LIST),
],
entities=["fs1_my_entity_1"],
tags={"team": "matchmaking"},
input=batch_source,
ttl=timedelta(minutes=5),
)
fv2 = FeatureView(
name="my_feature_view_2",
features=[
Feature(name="fs1_my_feature_1", dtype=ValueType.INT64),
Feature(name="fs1_my_feature_2", dtype=ValueType.STRING),
Feature(name="fs1_my_feature_3", dtype=ValueType.STRING_LIST),
Feature(name="fs1_my_feature_4", dtype=ValueType.BYTES_LIST),
],
entities=["fs1_my_entity_1"],
tags={"team": "matchmaking"},
input=batch_source,
ttl=timedelta(minutes=5),
)
# Register Feature View
test_feature_store.apply([fv1, e1, fv2, e2])
fv1_actual = test_feature_store.get_feature_view("my_feature_view_1")
e1_actual = test_feature_store.get_entity("fs1_my_entity_1")
assert fv1 == fv1_actual
assert e1 == e1_actual
assert fv2 != fv1_actual
assert e2 != e1_actual
test_feature_store.teardown()
def test_apply_remote_repo():
fd, registry_path = mkstemp()
fd, online_store_path = mkstemp()
return FeatureStore(
config=RepoConfig(
registry=registry_path,
project="default",
provider="local",
online_store=SqliteOnlineStoreConfig(path=online_store_path),
)
)
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
@pytest.mark.parametrize("dataframe_source", [lazy_fixture("simple_dataset_1")])
def test_reapply_feature_view_success(test_feature_store, dataframe_source):
with prep_file_source(
df=dataframe_source, event_timestamp_column="ts_1"
) as file_source:
e = Entity(name="id", value_type=ValueType.STRING)
# Create Feature View
fv1 = FeatureView(
name="my_feature_view_1",
features=[Feature(name="string_col", dtype=ValueType.STRING)],
entities=["id"],
input=file_source,
ttl=timedelta(minutes=5),
)
# Register Feature View
test_feature_store.apply([fv1, e])
# Check Feature View
fv_stored = test_feature_store.get_feature_view(fv1.name)
assert len(fv_stored.materialization_intervals) == 0
# Run materialization
test_feature_store.materialize(datetime(2020, 1, 1), datetime(2021, 1, 1))
# Check Feature View
fv_stored = test_feature_store.get_feature_view(fv1.name)
assert len(fv_stored.materialization_intervals) == 1
# Apply again
test_feature_store.apply([fv1])
# Check Feature View
fv_stored = test_feature_store.get_feature_view(fv1.name)
assert len(fv_stored.materialization_intervals) == 1
# Change and apply Feature View
fv1 = FeatureView(
name="my_feature_view_1",
features=[Feature(name="int64_col", dtype=ValueType.INT64)],
entities=["id"],
input=file_source,
ttl=timedelta(minutes=5),
)
test_feature_store.apply([fv1])
# Check Feature View
fv_stored = test_feature_store.get_feature_view(fv1.name)
assert len(fv_stored.materialization_intervals) == 0
test_feature_store.teardown()
| 33.445135
| 102
| 0.667884
|
import time
from datetime import datetime, timedelta
from tempfile import mkstemp
import pytest
from pytest_lazyfixture import lazy_fixture
from feast import FileSource
from feast.data_format import ParquetFormat
from feast.entity import Entity
from feast.feature import Feature
from feast.feature_store import FeatureStore
from feast.feature_view import FeatureView
from feast.infra.offline_stores.file import FileOfflineStoreConfig
from feast.infra.online_stores.dynamodb import DynamoDBOnlineStoreConfig
from feast.infra.online_stores.sqlite import SqliteOnlineStoreConfig
from feast.protos.feast.types import Value_pb2 as ValueProto
from feast.repo_config import RepoConfig
from feast.value_type import ValueType
from tests.utils.data_source_utils import (
prep_file_source,
simple_bq_source_using_query_arg,
simple_bq_source_using_table_ref_arg,
)
@pytest.fixture
def feature_store_with_local_registry():
fd, registry_path = mkstemp()
fd, online_store_path = mkstemp()
return FeatureStore(
config=RepoConfig(
registry=registry_path,
project="default",
provider="local",
online_store=SqliteOnlineStoreConfig(path=online_store_path),
)
)
@pytest.fixture
def feature_store_with_gcs_registry():
from google.cloud import storage
storage_client = storage.Client()
bucket_name = f"feast-registry-test-{int(time.time() * 1000)}"
bucket = storage_client.bucket(bucket_name)
bucket = storage_client.create_bucket(bucket)
bucket.add_lifecycle_delete_rule(
age=14
)
bucket.patch()
bucket.blob("registry.db")
return FeatureStore(
config=RepoConfig(
registry=f"gs://{bucket_name}/registry.db",
project="default",
provider="gcp",
)
)
@pytest.fixture
def feature_store_with_s3_registry():
return FeatureStore(
config=RepoConfig(
registry=f"s3://feast-integration-tests/registries/{int(time.time() * 1000)}/registry.db",
project="default",
provider="aws",
online_store=DynamoDBOnlineStoreConfig(region="us-west-2"),
offline_store=FileOfflineStoreConfig(),
)
)
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
def test_apply_entity_success(test_feature_store):
entity = Entity(
name="driver_car_id",
description="Car driver id",
value_type=ValueType.STRING,
labels={"team": "matchmaking"},
)
test_feature_store.apply(entity)
entities = test_feature_store.list_entities()
entity = entities[0]
assert (
len(entities) == 1
and entity.name == "driver_car_id"
and entity.value_type == ValueType(ValueProto.ValueType.STRING)
and entity.description == "Car driver id"
and "team" in entity.labels
and entity.labels["team"] == "matchmaking"
)
test_feature_store.teardown()
@pytest.mark.integration
@pytest.mark.parametrize(
"test_feature_store",
[
lazy_fixture("feature_store_with_gcs_registry"),
lazy_fixture("feature_store_with_s3_registry"),
],
)
def test_apply_entity_integration(test_feature_store):
entity = Entity(
name="driver_car_id",
description="Car driver id",
value_type=ValueType.STRING,
labels={"team": "matchmaking"},
)
test_feature_store.apply([entity])
entities = test_feature_store.list_entities()
entity = entities[0]
assert (
len(entities) == 1
and entity.name == "driver_car_id"
and entity.value_type == ValueType(ValueProto.ValueType.STRING)
and entity.description == "Car driver id"
and "team" in entity.labels
and entity.labels["team"] == "matchmaking"
)
entity = test_feature_store.get_entity("driver_car_id")
assert (
entity.name == "driver_car_id"
and entity.value_type == ValueType(ValueProto.ValueType.STRING)
and entity.description == "Car driver id"
and "team" in entity.labels
and entity.labels["team"] == "matchmaking"
)
test_feature_store.teardown()
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
def test_apply_feature_view_success(test_feature_store):
batch_source = FileSource(
file_format=ParquetFormat(),
path="file://feast/*",
event_timestamp_column="ts_col",
created_timestamp_column="timestamp",
date_partition_column="date_partition_col",
)
fv1 = FeatureView(
name="my_feature_view_1",
features=[
Feature(name="fs1_my_feature_1", dtype=ValueType.INT64),
Feature(name="fs1_my_feature_2", dtype=ValueType.STRING),
Feature(name="fs1_my_feature_3", dtype=ValueType.STRING_LIST),
Feature(name="fs1_my_feature_4", dtype=ValueType.BYTES_LIST),
],
entities=["fs1_my_entity_1"],
tags={"team": "matchmaking"},
input=batch_source,
ttl=timedelta(minutes=5),
)
test_feature_store.apply([fv1])
feature_views = test_feature_store.list_feature_views()
assert (
len(feature_views) == 1
and feature_views[0].name == "my_feature_view_1"
and feature_views[0].features[0].name == "fs1_my_feature_1"
and feature_views[0].features[0].dtype == ValueType.INT64
and feature_views[0].features[1].name == "fs1_my_feature_2"
and feature_views[0].features[1].dtype == ValueType.STRING
and feature_views[0].features[2].name == "fs1_my_feature_3"
and feature_views[0].features[2].dtype == ValueType.STRING_LIST
and feature_views[0].features[3].name == "fs1_my_feature_4"
and feature_views[0].features[3].dtype == ValueType.BYTES_LIST
and feature_views[0].entities[0] == "fs1_my_entity_1"
)
test_feature_store.teardown()
@pytest.mark.integration
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
@pytest.mark.parametrize("dataframe_source", [lazy_fixture("simple_dataset_1")])
def test_feature_view_inference_success(test_feature_store, dataframe_source):
with prep_file_source(
df=dataframe_source, event_timestamp_column="ts_1"
) as file_source:
fv1 = FeatureView(
name="fv1",
entities=["id"],
ttl=timedelta(minutes=5),
online=True,
input=file_source,
tags={},
)
fv2 = FeatureView(
name="fv2",
entities=["id"],
ttl=timedelta(minutes=5),
online=True,
input=simple_bq_source_using_table_ref_arg(dataframe_source, "ts_1"),
tags={},
)
fv3 = FeatureView(
name="fv3",
entities=["id"],
ttl=timedelta(minutes=5),
online=True,
input=simple_bq_source_using_query_arg(dataframe_source, "ts_1"),
tags={},
)
test_feature_store.apply([fv1, fv2, fv3])
feature_view_1 = test_feature_store.list_feature_views()[0]
feature_view_2 = test_feature_store.list_feature_views()[1]
feature_view_3 = test_feature_store.list_feature_views()[2]
actual_file_source = {
(feature.name, feature.dtype) for feature in feature_view_1.features
}
actual_bq_using_table_ref_arg_source = {
(feature.name, feature.dtype) for feature in feature_view_2.features
}
actual_bq_using_query_arg_source = {
(feature.name, feature.dtype) for feature in feature_view_3.features
}
expected = {
("float_col", ValueType.DOUBLE),
("int64_col", ValueType.INT64),
("string_col", ValueType.STRING),
}
assert (
expected
== actual_file_source
== actual_bq_using_table_ref_arg_source
== actual_bq_using_query_arg_source
)
test_feature_store.teardown()
@pytest.mark.integration
@pytest.mark.parametrize(
"test_feature_store",
[
lazy_fixture("feature_store_with_gcs_registry"),
lazy_fixture("feature_store_with_s3_registry"),
],
)
def test_apply_feature_view_integration(test_feature_store):
batch_source = FileSource(
file_format=ParquetFormat(),
path="file://feast/*",
event_timestamp_column="ts_col",
created_timestamp_column="timestamp",
date_partition_column="date_partition_col",
)
fv1 = FeatureView(
name="my_feature_view_1",
features=[
Feature(name="fs1_my_feature_1", dtype=ValueType.INT64),
Feature(name="fs1_my_feature_2", dtype=ValueType.STRING),
Feature(name="fs1_my_feature_3", dtype=ValueType.STRING_LIST),
Feature(name="fs1_my_feature_4", dtype=ValueType.BYTES_LIST),
],
entities=["fs1_my_entity_1"],
tags={"team": "matchmaking"},
input=batch_source,
ttl=timedelta(minutes=5),
)
test_feature_store.apply([fv1])
feature_views = test_feature_store.list_feature_views()
assert (
len(feature_views) == 1
and feature_views[0].name == "my_feature_view_1"
and feature_views[0].features[0].name == "fs1_my_feature_1"
and feature_views[0].features[0].dtype == ValueType.INT64
and feature_views[0].features[1].name == "fs1_my_feature_2"
and feature_views[0].features[1].dtype == ValueType.STRING
and feature_views[0].features[2].name == "fs1_my_feature_3"
and feature_views[0].features[2].dtype == ValueType.STRING_LIST
and feature_views[0].features[3].name == "fs1_my_feature_4"
and feature_views[0].features[3].dtype == ValueType.BYTES_LIST
and feature_views[0].entities[0] == "fs1_my_entity_1"
)
feature_view = test_feature_store.get_feature_view("my_feature_view_1")
assert (
feature_view.name == "my_feature_view_1"
and feature_view.features[0].name == "fs1_my_feature_1"
and feature_view.features[0].dtype == ValueType.INT64
and feature_view.features[1].name == "fs1_my_feature_2"
and feature_view.features[1].dtype == ValueType.STRING
and feature_view.features[2].name == "fs1_my_feature_3"
and feature_view.features[2].dtype == ValueType.STRING_LIST
and feature_view.features[3].name == "fs1_my_feature_4"
and feature_view.features[3].dtype == ValueType.BYTES_LIST
and feature_view.entities[0] == "fs1_my_entity_1"
)
test_feature_store.delete_feature_view("my_feature_view_1")
feature_views = test_feature_store.list_feature_views()
assert len(feature_views) == 0
test_feature_store.teardown()
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
def test_apply_object_and_read(test_feature_store):
assert isinstance(test_feature_store, FeatureStore)
batch_source = FileSource(
file_format=ParquetFormat(),
path="file://feast/*",
event_timestamp_column="ts_col",
created_timestamp_column="timestamp",
)
e1 = Entity(
name="fs1_my_entity_1", value_type=ValueType.STRING, description="something"
)
e2 = Entity(
name="fs1_my_entity_2", value_type=ValueType.STRING, description="something"
)
fv1 = FeatureView(
name="my_feature_view_1",
features=[
Feature(name="fs1_my_feature_1", dtype=ValueType.INT64),
Feature(name="fs1_my_feature_2", dtype=ValueType.STRING),
Feature(name="fs1_my_feature_3", dtype=ValueType.STRING_LIST),
Feature(name="fs1_my_feature_4", dtype=ValueType.BYTES_LIST),
],
entities=["fs1_my_entity_1"],
tags={"team": "matchmaking"},
input=batch_source,
ttl=timedelta(minutes=5),
)
fv2 = FeatureView(
name="my_feature_view_2",
features=[
Feature(name="fs1_my_feature_1", dtype=ValueType.INT64),
Feature(name="fs1_my_feature_2", dtype=ValueType.STRING),
Feature(name="fs1_my_feature_3", dtype=ValueType.STRING_LIST),
Feature(name="fs1_my_feature_4", dtype=ValueType.BYTES_LIST),
],
entities=["fs1_my_entity_1"],
tags={"team": "matchmaking"},
input=batch_source,
ttl=timedelta(minutes=5),
)
test_feature_store.apply([fv1, e1, fv2, e2])
fv1_actual = test_feature_store.get_feature_view("my_feature_view_1")
e1_actual = test_feature_store.get_entity("fs1_my_entity_1")
assert fv1 == fv1_actual
assert e1 == e1_actual
assert fv2 != fv1_actual
assert e2 != e1_actual
test_feature_store.teardown()
def test_apply_remote_repo():
fd, registry_path = mkstemp()
fd, online_store_path = mkstemp()
return FeatureStore(
config=RepoConfig(
registry=registry_path,
project="default",
provider="local",
online_store=SqliteOnlineStoreConfig(path=online_store_path),
)
)
@pytest.mark.parametrize(
"test_feature_store", [lazy_fixture("feature_store_with_local_registry")],
)
@pytest.mark.parametrize("dataframe_source", [lazy_fixture("simple_dataset_1")])
def test_reapply_feature_view_success(test_feature_store, dataframe_source):
with prep_file_source(
df=dataframe_source, event_timestamp_column="ts_1"
) as file_source:
e = Entity(name="id", value_type=ValueType.STRING)
fv1 = FeatureView(
name="my_feature_view_1",
features=[Feature(name="string_col", dtype=ValueType.STRING)],
entities=["id"],
input=file_source,
ttl=timedelta(minutes=5),
)
test_feature_store.apply([fv1, e])
fv_stored = test_feature_store.get_feature_view(fv1.name)
assert len(fv_stored.materialization_intervals) == 0
test_feature_store.materialize(datetime(2020, 1, 1), datetime(2021, 1, 1))
fv_stored = test_feature_store.get_feature_view(fv1.name)
assert len(fv_stored.materialization_intervals) == 1
test_feature_store.apply([fv1])
fv_stored = test_feature_store.get_feature_view(fv1.name)
assert len(fv_stored.materialization_intervals) == 1
fv1 = FeatureView(
name="my_feature_view_1",
features=[Feature(name="int64_col", dtype=ValueType.INT64)],
entities=["id"],
input=file_source,
ttl=timedelta(minutes=5),
)
test_feature_store.apply([fv1])
fv_stored = test_feature_store.get_feature_view(fv1.name)
assert len(fv_stored.materialization_intervals) == 0
test_feature_store.teardown()
| true
| true
|
1c431aabedb41ed6587e6ba57f1ebca93ef9a5d7
| 270
|
py
|
Python
|
exercicio29.py
|
FelipeRossoni/infosatc-lp-avaliativo-01
|
8981927cb8fbad5cffa20533557a8402b794455c
|
[
"MIT"
] | null | null | null |
exercicio29.py
|
FelipeRossoni/infosatc-lp-avaliativo-01
|
8981927cb8fbad5cffa20533557a8402b794455c
|
[
"MIT"
] | null | null | null |
exercicio29.py
|
FelipeRossoni/infosatc-lp-avaliativo-01
|
8981927cb8fbad5cffa20533557a8402b794455c
|
[
"MIT"
] | null | null | null |
n1 = float(input("Digite a primeira nota: "))
n2 = float(input("Digite a segunda nota: "))
n3 = float(input("Digite a segunda nota: "))
n4 = float(input("Digite a terceira nota: "))
media = ((n1+n2+n3+n4)/4)
print("A média aritmética de suas notas é : {}".format(media))
| 45
| 62
| 0.666667
|
n1 = float(input("Digite a primeira nota: "))
n2 = float(input("Digite a segunda nota: "))
n3 = float(input("Digite a segunda nota: "))
n4 = float(input("Digite a terceira nota: "))
media = ((n1+n2+n3+n4)/4)
print("A média aritmética de suas notas é : {}".format(media))
| true
| true
|
1c431b0d542110ce047d54c74904da7d90a27db8
| 755
|
py
|
Python
|
201509/3.py
|
L-LYR/csp-sol
|
6c0aec82d4704dc8b53886fe1f72e5088d6eab6d
|
[
"MIT"
] | null | null | null |
201509/3.py
|
L-LYR/csp-sol
|
6c0aec82d4704dc8b53886fe1f72e5088d6eab6d
|
[
"MIT"
] | null | null | null |
201509/3.py
|
L-LYR/csp-sol
|
6c0aec82d4704dc8b53886fe1f72e5088d6eab6d
|
[
"MIT"
] | null | null | null |
# Time: 04/02/21
# Author: HammerLi
# Tags: [Simulation]
# Title: 模板生成系统
# Content:
# 给定字符串替换
from collections import defaultdict
m, n = map(int, input().split(' '))
template = ""
for _ in range(m):
template += "\n" + input()
template = template[1:]
vars = defaultdict(str)
for _ in range(n):
line = input()
sep = line.find(' ')
var, tar = line[:sep], line[sep+2:-1]
vars[var] = tar
i = 0
while i < len(template):
left = template.find("{{ ", i)
if left == -1:
break
right = template.find(" }}", left)
if right == -1:
break
key = template[left+3:right]
template = template[:left] + vars[key] + template[right + 3:]
i = left + len(vars[key])
print(template)
# 防止递归替换,这里只能在输入完之后,逐个查询
| 19.358974
| 65
| 0.580132
|
from collections import defaultdict
m, n = map(int, input().split(' '))
template = ""
for _ in range(m):
template += "\n" + input()
template = template[1:]
vars = defaultdict(str)
for _ in range(n):
line = input()
sep = line.find(' ')
var, tar = line[:sep], line[sep+2:-1]
vars[var] = tar
i = 0
while i < len(template):
left = template.find("{{ ", i)
if left == -1:
break
right = template.find(" }}", left)
if right == -1:
break
key = template[left+3:right]
template = template[:left] + vars[key] + template[right + 3:]
i = left + len(vars[key])
print(template)
| true
| true
|
1c431b21c6b302af48a3044f2455e1b97883f4e0
| 2,942
|
py
|
Python
|
malicious-payload-text-classifier/data_utils.py
|
kosletr/SessionBehaviorClassifierAPI
|
15e72da6c9c84dca20beb16469c855e11f901b82
|
[
"MIT"
] | 1
|
2020-10-22T09:35:34.000Z
|
2020-10-22T09:35:34.000Z
|
malicious-payload-text-classifier/data_utils.py
|
kosletr/SessionBehaviorClassifierAPI
|
15e72da6c9c84dca20beb16469c855e11f901b82
|
[
"MIT"
] | null | null | null |
malicious-payload-text-classifier/data_utils.py
|
kosletr/SessionBehaviorClassifierAPI
|
15e72da6c9c84dca20beb16469c855e11f901b82
|
[
"MIT"
] | null | null | null |
import numpy as np
import re
import csv
class Data(object):
"""
Class to handle loading and processing of raw datasets.
"""
def __init__(self, data_source,
alphabet="abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}",
input_size=1014, num_of_classes=8):
"""
Initialization of a Data object.
Args:
data_source (str): Raw data file path
alphabet (str): Alphabet of characters to index
input_size (int): Size of input features
num_of_classes (int): Number of classes in data
"""
self.alphabet = alphabet
self.alphabet_size = len(self.alphabet)
self.dict = {} # Maps each character to an integer
self.no_of_classes = num_of_classes
for idx, char in enumerate(self.alphabet):
self.dict[char] = idx + 1
self.length = input_size
self.data_source = data_source
def load_data(self):
"""
Load raw data from the source file into data variable.
Returns: None
"""
data = []
with open(self.data_source, 'r', encoding='utf-8') as f:
rdr = csv.reader(f, delimiter=',', quotechar='"')
for row in rdr:
txt = ""
for s in row[1:]:
txt = txt + " " + \
re.sub("^\s*(.-)\s*$", "%1", s).replace("\\n", "\n")
data.append((int(row[0]), txt)) # format: (label, text)
self.data = np.array(data)
print("Data loaded from " + self.data_source)
def get_all_data(self):
"""
Return all loaded data from data variable.
Returns:
(np.ndarray) Data transformed from raw to indexed form with associated one-hot label.
"""
data_size = len(self.data)
start_index = 0
end_index = data_size
batch_texts = self.data[start_index:end_index]
batch_indices = []
one_hot = np.eye(self.no_of_classes, dtype='int64')
classes = []
for c, s in batch_texts:
batch_indices.append(self.str_to_indexes(s))
#c = int(c) - 1
c = int(c)
classes.append(one_hot[c])
return np.asarray(batch_indices, dtype='int64'), np.asarray(classes), batch_texts
def str_to_indexes(self, s):
"""
Convert a string to character indexes based on character dictionary.
Args:
s (str): String to be converted to indexes
Returns:
str2idx (np.ndarray): Indexes of characters in s
"""
s = s.lower()
max_length = min(len(s), self.length)
str2idx = np.zeros(self.length, dtype='int64')
for i in range(1, max_length + 1):
c = s[-i]
if c in self.dict:
str2idx[i - 1] = self.dict[c]
return str2idx
| 31.978261
| 100
| 0.539089
|
import numpy as np
import re
import csv
class Data(object):
def __init__(self, data_source,
alphabet="abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}",
input_size=1014, num_of_classes=8):
self.alphabet = alphabet
self.alphabet_size = len(self.alphabet)
self.dict = {} # Maps each character to an integer
self.no_of_classes = num_of_classes
for idx, char in enumerate(self.alphabet):
self.dict[char] = idx + 1
self.length = input_size
self.data_source = data_source
def load_data(self):
data = []
with open(self.data_source, 'r', encoding='utf-8') as f:
rdr = csv.reader(f, delimiter=',', quotechar='"')
for row in rdr:
txt = ""
for s in row[1:]:
txt = txt + " " + \
re.sub("^\s*(.-)\s*$", "%1", s).replace("\\n", "\n")
data.append((int(row[0]), txt)) # format: (label, text)
self.data = np.array(data)
print("Data loaded from " + self.data_source)
def get_all_data(self):
data_size = len(self.data)
start_index = 0
end_index = data_size
batch_texts = self.data[start_index:end_index]
batch_indices = []
one_hot = np.eye(self.no_of_classes, dtype='int64')
classes = []
for c, s in batch_texts:
batch_indices.append(self.str_to_indexes(s))
#c = int(c) - 1
c = int(c)
classes.append(one_hot[c])
return np.asarray(batch_indices, dtype='int64'), np.asarray(classes), batch_texts
def str_to_indexes(self, s):
s = s.lower()
max_length = min(len(s), self.length)
str2idx = np.zeros(self.length, dtype='int64')
for i in range(1, max_length + 1):
c = s[-i]
if c in self.dict:
str2idx[i - 1] = self.dict[c]
return str2idx
| true
| true
|
1c431b6d68ed9856dc72ebd54e4bc377a51e12c1
| 92
|
py
|
Python
|
bitmovin_api_sdk/encoding/filters/scale/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/filters/scale/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/filters/scale/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.filters.scale.customdata.customdata_api import CustomdataApi
| 46
| 91
| 0.902174
|
from bitmovin_api_sdk.encoding.filters.scale.customdata.customdata_api import CustomdataApi
| true
| true
|
1c431c2081e2256040933dc48463bcb4ec3b752b
| 2,772
|
py
|
Python
|
laueagle/yamllint/rules/comments.py
|
yetship/laueagle
|
c2a1e4e56fdeaff3c7bb9b104b960db6ebac2eba
|
[
"MIT"
] | 1
|
2018-05-07T10:19:00.000Z
|
2018-05-07T10:19:00.000Z
|
laueagle/yamllint/rules/comments.py
|
yetship/laueagle
|
c2a1e4e56fdeaff3c7bb9b104b960db6ebac2eba
|
[
"MIT"
] | null | null | null |
laueagle/yamllint/rules/comments.py
|
yetship/laueagle
|
c2a1e4e56fdeaff3c7bb9b104b960db6ebac2eba
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Adrien Vergé
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Use this rule to control the position and formatting of comments.
.. rubric:: Options
* Use ``require-starting-space`` to require a space character right after the
``#``. Set to ``true`` to enable, ``false`` to disable.
* ``min-spaces-from-content`` is used to visually separate inline comments from
content. It defines the minimal required number of spaces between a comment
and its preceding content.
.. rubric:: Examples
#. With ``comments: {require-starting-space: true}``
the following code snippet would **PASS**:
::
# This sentence
# is a block comment
the following code snippet would **PASS**:
::
##############################
## This is some documentation
the following code snippet would **FAIL**:
::
#This sentence
#is a block comment
#. With ``comments: {min-spaces-from-content: 2}``
the following code snippet would **PASS**:
::
x = 2 ^ 127 - 1 # Mersenne prime number
the following code snippet would **FAIL**:
::
x = 2 ^ 127 - 1 # Mersenne prime number
"""
from ..linter import LintProblem
ID = 'comments'
TYPE = 'comment'
CONF = {'require-starting-space': bool,
'min-spaces-from-content': int}
def check(conf, comment):
if (conf['min-spaces-from-content'] != -1 and comment.is_inline() and
comment.pointer - comment.token_before.end_mark.pointer <
conf['min-spaces-from-content']):
yield LintProblem(comment.line_no, comment.column_no,
'too few spaces before comment')
if conf['require-starting-space']:
text_start = comment.pointer + 1
while (comment.buffer[text_start] == '#' and
text_start < len(comment.buffer)):
text_start += 1
if (text_start < len(comment.buffer) and
comment.buffer[text_start] not in (' ', '\n', '\0')):
yield LintProblem(comment.line_no,
comment.column_no + text_start - comment.pointer,
'missing starting space in comment')
| 30.8
| 79
| 0.643939
|
from ..linter import LintProblem
ID = 'comments'
TYPE = 'comment'
CONF = {'require-starting-space': bool,
'min-spaces-from-content': int}
def check(conf, comment):
if (conf['min-spaces-from-content'] != -1 and comment.is_inline() and
comment.pointer - comment.token_before.end_mark.pointer <
conf['min-spaces-from-content']):
yield LintProblem(comment.line_no, comment.column_no,
'too few spaces before comment')
if conf['require-starting-space']:
text_start = comment.pointer + 1
while (comment.buffer[text_start] == '#' and
text_start < len(comment.buffer)):
text_start += 1
if (text_start < len(comment.buffer) and
comment.buffer[text_start] not in (' ', '\n', '\0')):
yield LintProblem(comment.line_no,
comment.column_no + text_start - comment.pointer,
'missing starting space in comment')
| true
| true
|
1c431d3e30546faa9501c46b5a246dec7d3c2ce2
| 520
|
py
|
Python
|
Geometry/HGCalGeometry/python/hgcalTestNeighbor_cfi.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
Geometry/HGCalGeometry/python/hgcalTestNeighbor_cfi.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 26
|
2018-10-30T12:47:58.000Z
|
2022-03-29T08:39:00.000Z
|
Geometry/HGCalGeometry/python/hgcalTestNeighbor_cfi.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
from Geometry.HGCalGeometry.hgcalEETestNeighbor_cfi import *
hgcalHEFTestNeighbor = hgcalEETestNeighbor.clone(
detector = cms.string("HGCalHESiliconSensitive"))
hgcalHEBTestNeighbor = hgcalEETestNeighbor.clone(
detector = cms.string("HCal"))
from Configuration.Eras.Modifier_phase2_hgcalV9_cff import phase2_hgcalV9
phase2_hgcalV9.toModify(hgcalHEBTestNeighbor,
detector = cms.string("HGCalHEScintillatorSensitive")
)
| 32.5
| 77
| 0.755769
|
import FWCore.ParameterSet.Config as cms
from Geometry.HGCalGeometry.hgcalEETestNeighbor_cfi import *
hgcalHEFTestNeighbor = hgcalEETestNeighbor.clone(
detector = cms.string("HGCalHESiliconSensitive"))
hgcalHEBTestNeighbor = hgcalEETestNeighbor.clone(
detector = cms.string("HCal"))
from Configuration.Eras.Modifier_phase2_hgcalV9_cff import phase2_hgcalV9
phase2_hgcalV9.toModify(hgcalHEBTestNeighbor,
detector = cms.string("HGCalHEScintillatorSensitive")
)
| true
| true
|
1c431e26738e68771508430d0d0ebbb022437bb7
| 8,609
|
py
|
Python
|
kornia/filters/motion.py
|
Manza12/kornia
|
580bbbffc771470445de27a7957d970b5a606172
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-08-31T06:04:28.000Z
|
2021-08-31T06:04:28.000Z
|
kornia/filters/motion.py
|
Manza12/kornia
|
580bbbffc771470445de27a7957d970b5a606172
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/filters/motion.py
|
Manza12/kornia
|
580bbbffc771470445de27a7957d970b5a606172
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from typing import Tuple, Union
import torch
import torch.nn as nn
import kornia
from kornia.filters.kernels_geometry import get_motion_kernel2d, get_motion_kernel3d
class MotionBlur(nn.Module):
r"""Blur 2D images (4D tensor) using the motion filter.
Args:
kernel_size (int): motion kernel width and height. It should be odd and positive.
angle (float): angle of the motion blur in degrees (anti-clockwise rotation).
direction (float): forward/backward direction of the motion blur.
Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),
while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a
uniformly (but still angled) motion blur.
border_type (str): the padding mode to be applied before convolving. The expected modes are:
``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'constant'``.
Returns:
torch.Tensor: the blurred input tensor.
Shape:
- Input: :math:`(B, C, H, W)`
- Output: :math:`(B, C, H, W)`
Examples:
>>> input = torch.rand(2, 4, 5, 7)
>>> motion_blur = MotionBlur(3, 35., 0.5)
>>> output = motion_blur(input) # 2x4x5x7
"""
def __init__(self, kernel_size: int, angle: float, direction: float, border_type: str = 'constant') -> None:
super(MotionBlur, self).__init__()
self.kernel_size = kernel_size
self.angle: float = angle
self.direction: float = direction
self.border_type: str = border_type
def __repr__(self) -> str:
return (
f'{self.__class__.__name__} (kernel_size={self.kernel_size}, '
f'angle={self.angle}, direction={self.direction}, border_type={self.border_type})'
)
def forward(self, x: torch.Tensor):
return motion_blur(x, self.kernel_size, self.angle, self.direction, self.border_type)
class MotionBlur3D(nn.Module):
r"""Blur 3D volumes (5D tensor) using the motion filter.
Args:
kernel_size (int): motion kernel width and height. It should be odd and positive.
angle (float or tuple): Range of yaw (x-axis), pitch (y-axis), roll (z-axis) to select from.
direction (float): forward/backward direction of the motion blur.
Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),
while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a
uniformly (but still angled) motion blur.
border_type (str): the padding mode to be applied before convolving. The expected modes are:
``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'constant'``.
Returns:
torch.Tensor: the blurred input tensor.
Shape:
- Input: :math:`(B, C, D, H, W)`
- Output: :math:`(B, C, D, H, W)`
Examples:
>>> input = torch.rand(2, 4, 5, 7, 9)
>>> motion_blur = MotionBlur3D(3, 35., 0.5)
>>> output = motion_blur(input) # 2x4x5x7x9
"""
def __init__(
self,
kernel_size: int,
angle: Union[float, Tuple[float, float, float]],
direction: float,
border_type: str = 'constant',
) -> None:
super(MotionBlur3D, self).__init__()
self.kernel_size = kernel_size
self.angle: Tuple[float, float, float]
if isinstance(angle, float):
self.angle = (angle, angle, angle)
elif isinstance(angle, (tuple, list)) and len(angle) == 3:
self.angle = angle
else:
raise ValueError(f"Expect angle to be either a float or a tuple of floats. Got {angle}.")
self.direction: float = direction
self.border_type: str = border_type
def __repr__(self) -> str:
return (
f'{self.__class__.__name__} (kernel_size={self.kernel_size}, '
f'angle={self.angle}, direction={self.direction}, border_type={self.border_type})'
)
def forward(self, x: torch.Tensor):
return motion_blur3d(x, self.kernel_size, self.angle, self.direction, self.border_type)
def motion_blur(
input: torch.Tensor,
kernel_size: int,
angle: Union[float, torch.Tensor],
direction: Union[float, torch.Tensor],
border_type: str = 'constant',
mode: str = 'nearest',
) -> torch.Tensor:
r"""Perform motion blur on 2D images (4D tensor).
Args:
input (torch.Tensor): the input tensor with shape :math:`(B, C, H, W)`.
kernel_size (int): motion kernel width and height. It should be odd and positive.
angle (Union[torch.Tensor, float]): angle of the motion blur in degrees (anti-clockwise rotation).
If tensor, it must be :math:`(B,)`.
direction (tensor or float): forward/backward direction of the motion blur.
Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),
while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a
uniformly (but still angled) motion blur.
If tensor, it must be :math:`(B,)`.
border_type (str): the padding mode to be applied before convolving. The expected modes are:
``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'constant'``.
mode (str): interpolation mode for rotating the kernel. ``'bilinear'`` or ``'nearest'``.
Default: ``'nearest'``
Return:
torch.Tensor: the blurred image with shape :math:`(B, C, H, W)`.
Example:
>>> input = torch.randn(1, 3, 80, 90).repeat(2, 1, 1, 1)
>>> # perform exact motion blur across the batch
>>> out_1 = motion_blur(input, 5, 90., 1)
>>> torch.allclose(out_1[0], out_1[1])
True
>>> # perform element-wise motion blur across the batch
>>> out_1 = motion_blur(input, 5, torch.tensor([90., 180,]), torch.tensor([1., -1.]))
>>> torch.allclose(out_1[0], out_1[1])
False
"""
assert border_type in ["constant", "reflect", "replicate", "circular"]
kernel: torch.Tensor = get_motion_kernel2d(kernel_size, angle, direction, mode)
return kornia.filter2d(input, kernel, border_type)
def motion_blur3d(
input: torch.Tensor,
kernel_size: int,
angle: Union[Tuple[float, float, float], torch.Tensor],
direction: Union[float, torch.Tensor],
border_type: str = 'constant',
mode: str = 'nearest',
) -> torch.Tensor:
r"""Perform motion blur on 3D volumes (5D tensor).
Args:
input (torch.Tensor): the input tensor with shape :math:`(B, C, D, H, W)`.
kernel_size (int): motion kernel width, height and depth. It should be odd and positive.
angle (torch.Tensor or tuple): Range of yaw (x-axis), pitch (y-axis), roll (z-axis) to select from.
If tensor, it must be :math:`(B, 3)`.
direction (tensor or float): forward/backward direction of the motion blur.
Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),
while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a
uniformly (but still angled) motion blur.
If tensor, it must be :math:`(B,)`.
border_type (str): the padding mode to be applied before convolving. The expected modes are:
``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'constant'``.
mode (str): interpolation mode for rotating the kernel. ``'bilinear'`` or ``'nearest'``.
Default: ``'nearest'``
Return:
torch.Tensor: the blurred image with shape :math:`(B, C, D, H, W)`.
Example:
>>> input = torch.randn(1, 3, 120, 80, 90).repeat(2, 1, 1, 1, 1)
>>> # perform exact motion blur across the batch
>>> out_1 = motion_blur3d(input, 5, (0., 90., 90.), 1)
>>> torch.allclose(out_1[0], out_1[1])
True
>>> # perform element-wise motion blur across the batch
>>> out_1 = motion_blur3d(input, 5, torch.tensor([[0., 90., 90.], [90., 180., 0.]]), torch.tensor([1., -1.]))
>>> torch.allclose(out_1[0], out_1[1])
False
"""
assert border_type in ["constant", "reflect", "replicate", "circular"]
kernel: torch.Tensor = get_motion_kernel3d(kernel_size, angle, direction, mode)
return kornia.filter3d(input, kernel, border_type)
| 44.376289
| 117
| 0.619352
|
from typing import Tuple, Union
import torch
import torch.nn as nn
import kornia
from kornia.filters.kernels_geometry import get_motion_kernel2d, get_motion_kernel3d
class MotionBlur(nn.Module):
def __init__(self, kernel_size: int, angle: float, direction: float, border_type: str = 'constant') -> None:
super(MotionBlur, self).__init__()
self.kernel_size = kernel_size
self.angle: float = angle
self.direction: float = direction
self.border_type: str = border_type
def __repr__(self) -> str:
return (
f'{self.__class__.__name__} (kernel_size={self.kernel_size}, '
f'angle={self.angle}, direction={self.direction}, border_type={self.border_type})'
)
def forward(self, x: torch.Tensor):
return motion_blur(x, self.kernel_size, self.angle, self.direction, self.border_type)
class MotionBlur3D(nn.Module):
def __init__(
self,
kernel_size: int,
angle: Union[float, Tuple[float, float, float]],
direction: float,
border_type: str = 'constant',
) -> None:
super(MotionBlur3D, self).__init__()
self.kernel_size = kernel_size
self.angle: Tuple[float, float, float]
if isinstance(angle, float):
self.angle = (angle, angle, angle)
elif isinstance(angle, (tuple, list)) and len(angle) == 3:
self.angle = angle
else:
raise ValueError(f"Expect angle to be either a float or a tuple of floats. Got {angle}.")
self.direction: float = direction
self.border_type: str = border_type
def __repr__(self) -> str:
return (
f'{self.__class__.__name__} (kernel_size={self.kernel_size}, '
f'angle={self.angle}, direction={self.direction}, border_type={self.border_type})'
)
def forward(self, x: torch.Tensor):
return motion_blur3d(x, self.kernel_size, self.angle, self.direction, self.border_type)
def motion_blur(
input: torch.Tensor,
kernel_size: int,
angle: Union[float, torch.Tensor],
direction: Union[float, torch.Tensor],
border_type: str = 'constant',
mode: str = 'nearest',
) -> torch.Tensor:
assert border_type in ["constant", "reflect", "replicate", "circular"]
kernel: torch.Tensor = get_motion_kernel2d(kernel_size, angle, direction, mode)
return kornia.filter2d(input, kernel, border_type)
def motion_blur3d(
input: torch.Tensor,
kernel_size: int,
angle: Union[Tuple[float, float, float], torch.Tensor],
direction: Union[float, torch.Tensor],
border_type: str = 'constant',
mode: str = 'nearest',
) -> torch.Tensor:
assert border_type in ["constant", "reflect", "replicate", "circular"]
kernel: torch.Tensor = get_motion_kernel3d(kernel_size, angle, direction, mode)
return kornia.filter3d(input, kernel, border_type)
| true
| true
|
1c431e5e2201f855831bb4ec0c36521e9a0108ea
| 60,380
|
py
|
Python
|
desktop/core/ext-py/Django-1.11.20/tests/forms_tests/tests/test_formsets.py
|
maulikjs/hue
|
59ac879b55bb6fb26ecb4e85f4c70836fc21173f
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
tests/forms_tests/tests/test_formsets.py
|
287977288/test
|
142e3626ab3c676574631383ae6b5a4eced5a10e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
tests/forms_tests/tests/test_formsets.py
|
287977288/test
|
142e3626ab3c676574631383ae6b5a4eced5a10e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from collections import Counter
from django.forms import (
BaseForm, CharField, DateField, FileField, Form, IntegerField,
SplitDateTimeField, ValidationError, formsets,
)
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.test import SimpleTestCase, mock
from django.utils.encoding import force_text
class Choice(Form):
choice = CharField()
votes = IntegerField()
# FormSet allows us to use multiple instance of the same form on 1 page. For now,
# the best way to create a FormSet is by using the formset_factory function.
ChoiceFormSet = formset_factory(Choice)
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink['name'] in seen_drinks:
raise ValidationError('You may only specify a drink once.')
seen_drinks.append(drink['name'])
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError("Clean method called")
# Let's define a FormSet that takes a list of favorite drinks, but raises an
# error if there are any duplicates. Used in ``test_clean_hook``,
# ``test_regression_6926`` & ``test_regression_12878``.
FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm, formset=BaseFavoriteDrinksFormSet, extra=3)
# Used in ``test_formset_splitdatetimefield``.
class SplitDateTimeForm(Form):
when = SplitDateTimeField(initial=datetime.datetime.now)
SplitDateTimeFormSet = formset_factory(SplitDateTimeForm)
class CustomKwargForm(Form):
def __init__(self, *args, **kwargs):
self.custom_kwarg = kwargs.pop('custom_kwarg')
super(CustomKwargForm, self).__init__(*args, **kwargs)
class FormsFormsetTestCase(SimpleTestCase):
def make_choiceformset(
self, formset_data=None, formset_class=ChoiceFormSet,
total_forms=None, initial_forms=0, max_num_forms=0, min_num_forms=0, **kwargs):
"""
Make a ChoiceFormset from the given formset_data.
The data should be given as a list of (choice, votes) tuples.
"""
kwargs.setdefault('prefix', 'choices')
kwargs.setdefault('auto_id', False)
if formset_data is None:
return formset_class(**kwargs)
if total_forms is None:
total_forms = len(formset_data)
def prefixed(*args):
args = (kwargs['prefix'],) + args
return '-'.join(args)
data = {
prefixed('TOTAL_FORMS'): str(total_forms),
prefixed('INITIAL_FORMS'): str(initial_forms),
prefixed('MAX_NUM_FORMS'): str(max_num_forms),
prefixed('MIN_NUM_FORMS'): str(min_num_forms),
}
for i, (choice, votes) in enumerate(formset_data):
data[prefixed(str(i), 'choice')] = choice
data[prefixed(str(i), 'votes')] = votes
return formset_class(data, **kwargs)
def test_basic_formset(self):
# A FormSet constructor takes the same arguments as Form. Let's create a FormSet
# for adding data. By default, it displays 1 blank form. It can display more,
# but we'll look at how to do so later.
formset = self.make_choiceformset()
self.assertHTMLEqual(
str(formset),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="1000" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" /></td></tr>"""
)
# We treat FormSet pretty much like we would treat a normal Form. FormSet has an
# is_valid method, and a cleaned_data or errors attribute depending on whether all
# the forms passed validation. However, unlike a Form instance, cleaned_data and
# errors will be a list of dicts rather than just a single dict.
formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}])
# If a FormSet was not passed any data, its is_valid and has_changed
# methods should return False.
formset = self.make_choiceformset()
self.assertFalse(formset.is_valid())
self.assertFalse(formset.has_changed())
def test_form_kwargs_formset(self):
"""
Custom kwargs set on the formset instance are passed to the
underlying forms.
"""
FormSet = formset_factory(CustomKwargForm, extra=2)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
for form in formset:
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, 1)
def test_form_kwargs_formset_dynamic(self):
"""
Form kwargs can be passed dynamically in a formset.
"""
class DynamicBaseFormSet(BaseFormSet):
def get_form_kwargs(self, index):
return {'custom_kwarg': index}
DynamicFormSet = formset_factory(CustomKwargForm, formset=DynamicBaseFormSet, extra=2)
formset = DynamicFormSet(form_kwargs={'custom_kwarg': 'ignored'})
for i, form in enumerate(formset):
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, i)
def test_form_kwargs_empty_form(self):
FormSet = formset_factory(CustomKwargForm)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
self.assertTrue(hasattr(formset.empty_form, 'custom_kwarg'))
self.assertEqual(formset.empty_form.custom_kwarg, 1)
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': ['This field is required.']}])
def test_formset_validation_count(self):
"""
A formset's ManagementForm is validated once per FormSet.is_valid()
call and each form of the formset is cleaned once.
"""
def make_method_counter(func):
"""Add a counter to func for the number of times it's called."""
counter = Counter()
counter.call_count = 0
def mocked_func(*args, **kwargs):
counter.call_count += 1
return func(*args, **kwargs)
return mocked_func, counter
mocked_is_valid, is_valid_counter = make_method_counter(formsets.ManagementForm.is_valid)
mocked_full_clean, full_clean_counter = make_method_counter(BaseForm.full_clean)
formset = self.make_choiceformset([('Calexico', '100'), ('Any1', '42'), ('Any2', '101')])
with mock.patch('django.forms.formsets.ManagementForm.is_valid', mocked_is_valid), \
mock.patch('django.forms.forms.BaseForm.full_clean', mocked_full_clean):
self.assertTrue(formset.is_valid())
self.assertEqual(is_valid_counter.call_count, 1)
self.assertEqual(full_clean_counter.call_count, 4)
def test_formset_has_changed(self):
# FormSet instances has_changed method will be True if any data is
# passed to his forms, even if the formset didn't validate
blank_formset = self.make_choiceformset([('', '')])
self.assertFalse(blank_formset.has_changed())
# invalid formset test
invalid_formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(invalid_formset.is_valid())
self.assertTrue(invalid_formset.has_changed())
# valid formset test
valid_formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(valid_formset.is_valid())
self.assertTrue(valid_formset.has_changed())
def test_formset_initial_data(self):
# We can also prefill a FormSet with existing data by providing an ``initial``
# argument to the constructor. ``initial`` should be a list of dicts. By default,
# an extra blank form is included.
initial = [{'choice': 'Calexico', 'votes': 100}]
formset = self.make_choiceformset(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>"""
)
# Let's simulate what would happen if we submitted this form.
formset = self.make_choiceformset([('Calexico', '100'), ('', '')], initial_forms=1)
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}])
def test_second_form_partially_filled(self):
# But the second form was blank! Shouldn't we get some errors? No. If we display
# a form as blank, it's ok for it to be submitted as blank. If we fill out even
# one of the fields of a blank form though, it will be validated. We may want to
# required that at least x number of forms are completed, but we'll show how to
# handle that later.
formset = self.make_choiceformset([('Calexico', '100'), ('The Decemberists', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}])
def test_delete_prefilled_data(self):
# If we delete data that was pre-filled, we should get an error. Simply removing
# data from form fields isn't the proper way to delete it. We'll see how to
# handle that case later.
formset = self.make_choiceformset([('', ''), ('', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors,
[{'votes': ['This field is required.'], 'choice': ['This field is required.']}, {}]
)
def test_displaying_more_than_one_blank_form(self):
# Displaying more than 1 blank form ###########################################
# We can also display more than 1 empty form at a time. To do so, pass a
# extra argument to formset_factory.
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>"""
)
# Since we displayed every form as blank, we will also accept them back as blank.
# This may seem a little strange, but later we will show how to require a minimum
# number of forms to be completed.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '',
'choices-0-votes': '',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_min_num_displaying_more_than_one_blank_form(self):
# We can also display more than 1 empty form passing min_num argument
# to formset_factory. It will (essentially) increment the extra argument
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=1)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
# Min_num forms are required; extra forms can be empty.
self.assertFalse(formset.forms[0].empty_permitted)
self.assertTrue(formset.forms[1].empty_permitted)
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>"""
)
def test_min_num_displaying_more_than_one_blank_form_with_zero_extra(self):
# We can also display more than 1 empty form passing min_num argument
ChoiceFormSet = formset_factory(Choice, extra=0, min_num=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>"""
)
def test_single_form_completed(self):
# We can just fill out one of the forms.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}, {}])
def test_formset_validate_max_flag(self):
# If validate_max is set and max_num is less than TOTAL_FORMS in the
# data, then throw an exception. MAX_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.'])
def test_formset_validate_min_flag(self):
# If validate_min is set and min_num is more than TOTAL_FORMS in the
# data, then throw an exception. MIN_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=3, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 3 or more forms.'])
def test_formset_validate_min_unchanged_forms(self):
"""
min_num validation doesn't consider unchanged forms with initial data
as "empty".
"""
initial = [
{'choice': 'Zero', 'votes': 0},
{'choice': 'One', 'votes': 0},
]
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '2',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '2',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1', # changed from initial
}
ChoiceFormSet = formset_factory(Choice, min_num=2, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices', initial=initial)
self.assertFalse(formset.forms[0].has_changed())
self.assertTrue(formset.forms[1].has_changed())
self.assertTrue(formset.is_valid())
def test_formset_validate_min_excludes_empty_forms(self):
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '0',
}
ChoiceFormSet = formset_factory(Choice, extra=2, min_num=1, validate_min=True, can_delete=True)
formset = ChoiceFormSet(data, prefix='choices')
self.assertFalse(formset.has_changed())
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or more forms.'])
def test_second_form_partially_filled_2(self):
# And once again, if we try to partially complete a form, validation will fail.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}, {}])
def test_more_initial_data(self):
# The extra argument also works when the formset is pre-filled with initial
# data.
initial = [{'choice': 'Calexico', 'votes': 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>"""
)
# Make sure retrieving an empty form works, and it shows up in the form list
self.assertTrue(formset.empty_form.empty_permitted)
self.assertHTMLEqual(
formset.empty_form.as_ul(),
"""<li>Choice: <input type="text" name="choices-__prefix__-choice" /></li>
<li>Votes: <input type="number" name="choices-__prefix__-votes" /></li>"""
)
def test_formset_with_deletion(self):
# FormSets with deletion ######################################################
# We can easily add deletion ability to a FormSet with an argument to
# formset_factory. This will add a boolean field to each form instance. When
# that boolean field is True, the form will be in formset.deleted_forms
ChoiceFormSet = formset_factory(Choice, can_delete=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>"""
)
# To delete something, we just need to set that form's special delete field to
# 'on'. Let's go ahead and delete Fergie.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-DELETE': 'on',
'choices-2-choice': '',
'choices-2-votes': '',
'choices-2-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.forms],
[
{'votes': 100, 'DELETE': False, 'choice': 'Calexico'},
{'votes': 900, 'DELETE': True, 'choice': 'Fergie'},
{},
]
)
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'choice': 'Fergie'}]
)
# If we fill a form with something and then we check the can_delete checkbox for
# that form, that form's errors should not make the entire formset invalid since
# it's going to be deleted.
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
'check-TOTAL_FORMS': '3', # the number of forms rendered
'check-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'check-MAX_NUM_FORMS': '0', # max number of forms
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
'check-2-field': '',
'check-2-DELETE': '',
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
# If we remove the deletion flag now we will have our validation back.
data['check-1-DELETE'] = ''
formset = CheckFormSet(data, prefix='check')
self.assertFalse(formset.is_valid())
# Should be able to get deleted_forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(
form=Person,
can_delete=True)
p = PeopleForm(
{'form-0-name': '', 'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0, 'form-MAX_NUM_FORMS': 1})
self.assertTrue(p.is_valid())
self.assertEqual(len(p.deleted_forms), 1)
def test_formsets_with_ordering(self):
# FormSets with ordering ######################################################
# We can also add ordering ability to a FormSet with an argument to
# formset_factory. This will add an integer field to each form instance. When
# form validation succeeds, [form.cleaned_data for form in formset.forms] will have the data in the correct
# order specified by the ordering fields. If a number is duplicated in the set
# of ordering fields, for instance form 0 and form 3 are both marked as 1, then
# the form index used as a secondary ordering criteria. In order to put
# something at the front of the list, you'd need to set it's order to 0.
ChoiceFormSet = formset_factory(Choice, can_order=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Order: <input type="number" name="choices-2-ORDER" /></li>"""
)
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
])
def test_empty_ordered_fields(self):
# Ordering fields are allowed to be left blank, and if they *are* left blank,
# they will be sorted below everything else.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '',
'choices-3-choice': 'Basia Bulat',
'choices-3-votes': '50',
'choices-3-ORDER': '',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
{'votes': 500, 'ORDER': None, 'choice': 'The Decemberists'},
{'votes': 50, 'ORDER': None, 'choice': 'Basia Bulat'},
])
def test_ordering_blank_fieldsets(self):
# Ordering should work with blank fieldsets.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [])
def test_formset_with_ordering_and_deletion(self):
# FormSets with ordering + deletion ###########################################
# Let's try throwing ordering and deletion into the same form.
ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True)
initial = [
{'choice': 'Calexico', 'votes': 100},
{'choice': 'Fergie', 'votes': 900},
{'choice': 'The Decemberists', 'votes': 500},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists" /></li>
<li>Votes: <input type="number" name="choices-2-votes" value="500" /></li>
<li>Order: <input type="number" name="choices-2-ORDER" value="3" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>
<li>Order: <input type="number" name="choices-3-ORDER" /></li>
<li>Delete: <input type="checkbox" name="choices-3-DELETE" /></li>"""
)
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-1-DELETE': 'on',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
'choices-2-DELETE': '',
'choices-3-choice': '',
'choices-3-votes': '',
'choices-3-ORDER': '',
'choices-3-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': 'Calexico'},
])
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': 'Fergie'}]
)
def test_invalid_deleted_form_with_ordering(self):
# Should be able to get ordered forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(form=Person, can_delete=True, can_order=True)
p = PeopleForm({
'form-0-name': '',
'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0,
'form-MAX_NUM_FORMS': 1
})
self.assertTrue(p.is_valid())
self.assertEqual(p.ordered_forms, [])
def test_clean_hook(self):
# FormSet clean hook ##########################################################
# FormSets have a hook for doing extra validation that shouldn't be tied to any
# particular form. It follows the same pattern as the clean hook on Forms.
# We start out with a some duplicate data.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
# Any errors raised by formset.clean() are available via the
# formset.non_form_errors() method.
for error in formset.non_form_errors():
self.assertEqual(str(error), 'You may only specify a drink once.')
# Make sure we didn't break the valid case.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Bloody Mary',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [])
def test_limiting_max_forms(self):
# Limiting the maximum number of forms ########################################
# Base case for max_num.
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the extra parameter.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th>
<td><input type="text" name="form-2-name" id="id_form-2-name" /></td></tr>"""
)
# If max_num is 0 then no form is rendered at all.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th><td>
<input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>"""
)
# max_num has no effect when extra is less than max_num.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>"""
)
def test_max_num_with_initial_data(self):
# max_num with initial data
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the initial and extra
# parameters.
initial = [
{'name': 'Fernet and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>"""
)
def test_max_num_zero(self):
# If max_num is 0 then no form is rendered at all, regardless of extra,
# unless initial data is present. (This changed in the patch for bug
# 20084 -- previously max_num=0 trumped initial data)
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
# test that initial trumps max_num
initial = [
{'name': 'Fernet and Coke'},
{'name': 'Bloody Mary'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Fernet and Coke" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>"""
)
def test_more_initial_than_max_num(self):
# More initial forms than max_num now results in all initial forms
# being displayed (but no extra forms). This behavior was changed
# from max_num taking precedence in the patch for #20084
initial = [
{'name': 'Gin Tonic'},
{'name': 'Bloody Mary'},
{'name': 'Jack and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Gin Tonic" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th>
<td><input id="id_form-2-name" name="form-2-name" type="text" value="Jack and Coke" /></td></tr>"""
)
# One form from initial and extra=3 with max_num=2 should result in the one
# initial form and one extra.
initial = [
{'name': 'Gin Tonic'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>"""
)
def test_regression_6926(self):
# Regression test for #6926 ##################################################
# Make sure the management form has the correct prefix.
formset = FavoriteDrinksFormSet()
self.assertEqual(formset.management_form.prefix, 'form')
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertEqual(formset.management_form.prefix, 'form')
formset = FavoriteDrinksFormSet(initial={})
self.assertEqual(formset.management_form.prefix, 'form')
def test_regression_12878(self):
# Regression test for #12878 #################################################
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['You may only specify a drink once.'])
def test_formset_iteration(self):
# Regression tests for #16455 -- formset instances are iterable
ChoiceFormset = formset_factory(Choice, extra=3)
formset = ChoiceFormset()
# confirm iterated formset yields formset.forms
forms = list(formset)
self.assertEqual(forms, formset.forms)
self.assertEqual(len(formset), len(forms))
# confirm indexing of formset
self.assertEqual(formset[0], forms[0])
with self.assertRaises(IndexError):
formset[3]
# Formsets can override the default iteration order
class BaseReverseFormSet(BaseFormSet):
def __iter__(self):
return reversed(self.forms)
def __getitem__(self, idx):
return super(BaseReverseFormSet, self).__getitem__(len(self) - idx - 1)
ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3)
reverse_formset = ReverseChoiceFormset()
# confirm that __iter__ modifies rendering order
# compare forms from "reverse" formset with forms from original formset
self.assertEqual(str(reverse_formset[0]), str(forms[-1]))
self.assertEqual(str(reverse_formset[1]), str(forms[-2]))
self.assertEqual(len(reverse_formset), len(forms))
def test_formset_nonzero(self):
"""
Formsets with no forms should still evaluate as true.
Regression test for #15722
"""
ChoiceFormset = formset_factory(Choice, extra=0)
formset = ChoiceFormset()
self.assertEqual(len(formset.forms), 0)
self.assertTrue(formset)
def test_formset_splitdatetimefield(self):
"""
Formset should also work with SplitDateTimeField(initial=datetime.datetime.now).
Regression test for #18709.
"""
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-when_0': '1904-06-16',
'form-0-when_1': '15:51:33',
}
formset = SplitDateTimeFormSet(data)
self.assertTrue(formset.is_valid())
def test_formset_error_class(self):
# Regression tests for #16479 -- formsets form use ErrorList instead of supplied error_class
class CustomErrorList(ErrorList):
pass
formset = FavoriteDrinksFormSet(error_class=CustomErrorList)
self.assertEqual(formset.forms[0].error_class, CustomErrorList)
def test_formset_calls_forms_is_valid(self):
# Regression tests for #18574 -- make sure formsets call
# is_valid() on each form.
class AnotherChoice(Choice):
def is_valid(self):
self.is_valid_called = True
return super(AnotherChoice, self).is_valid()
AnotherChoiceFormSet = formset_factory(AnotherChoice)
data = {
'choices-TOTAL_FORMS': '1', # number of forms rendered
'choices-INITIAL_FORMS': '0', # number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
formset = AnotherChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertTrue(all(form.is_valid_called for form in formset.forms))
def test_hard_limit_on_instantiated_forms(self):
"""A formset has a hard limit on the number of forms instantiated."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 2
ChoiceFormSet = formset_factory(Choice, max_num=1)
# someone fiddles with the mgmt form data...
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# But we still only instantiate 3 forms
self.assertEqual(len(formset.forms), 3)
# and the formset isn't valid
self.assertFalse(formset.is_valid())
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_increase_hard_limit(self):
"""Can increase the built-in forms limit via a higher max_num."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 3
# for this form, we want a limit of 4
ChoiceFormSet = formset_factory(Choice, max_num=4)
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# Four forms are instantiated and no exception is raised
self.assertEqual(len(formset.forms), 4)
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_non_form_errors_run_full_clean(self):
# Regression test for #11160
# If non_form_errors() is called without calling is_valid() first,
# it should ensure that full_clean() is called.
class BaseCustomFormSet(BaseFormSet):
def clean(self):
raise ValidationError("This is a non-form error")
ChoiceFormSet = formset_factory(Choice, formset=BaseCustomFormSet)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIsInstance(formset.non_form_errors(), ErrorList)
self.assertEqual(list(formset.non_form_errors()), ['This is a non-form error'])
def test_validate_max_ignores_forms_marked_for_deletion(self):
class CheckForm(Form):
field = IntegerField()
data = {
'check-TOTAL_FORMS': '2',
'check-INITIAL_FORMS': '0',
'check-MAX_NUM_FORMS': '1',
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
}
CheckFormSet = formset_factory(CheckForm, max_num=1, validate_max=True,
can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
def test_formset_total_error_count(self):
"""A valid formset should have 0 total errors."""
data = [ # formset_data, expected error count
([('Calexico', '100')], 0),
([('Calexico', '')], 1),
([('', 'invalid')], 2),
([('Calexico', '100'), ('Calexico', '')], 1),
([('Calexico', ''), ('Calexico', '')], 2),
]
for formset_data, expected_error_count in data:
formset = self.make_choiceformset(formset_data)
self.assertEqual(formset.total_error_count(), expected_error_count)
def test_formset_total_error_count_with_non_form_errors(self):
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 1)
data['choices-1-votes'] = ''
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 2)
def test_html_safe(self):
formset = self.make_choiceformset()
self.assertTrue(hasattr(formset, '__html__'))
self.assertEqual(force_text(formset), formset.__html__())
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
class FormsetAsFooTests(SimpleTestCase):
def test_as_table(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(
formset.as_table(),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" value="Calexico" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" value="100" /></td></tr>"""
)
def test_as_p(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(
formset.as_p(),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<p>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></p>
<p>Votes: <input type="number" name="choices-0-votes" value="100" /></p>"""
)
def test_as_ul(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(
formset.as_ul(),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>"""
)
# Regression test for #11418 #################################################
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(SimpleTestCase):
def test_no_data_raises_validation_error(self):
with self.assertRaises(ValidationError):
ArticleFormSet({}).is_valid()
def test_with_management_data_attrs_work_fine(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_caught_by_formset(self):
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
'form-1-title': 'Test',
'form-1-pub_date': '', # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual([{}, {'pub_date': ['This field is required.']}], formset.errors)
def test_empty_forms_are_unbound(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = [
unbound_formset.empty_form,
bound_formset.empty_form
]
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
class TestEmptyFormSet(SimpleTestCase):
def test_empty_formset_is_valid(self):
"""An empty formset still calls clean()"""
EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate)
formset = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '0'},
prefix="form",
)
formset2 = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '1', 'form-0-name': 'bah'},
prefix="form",
)
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
def test_empty_formset_media(self):
"""Make sure media is available on empty formset, refs #19545"""
class MediaForm(Form):
class Media:
js = ('some-file.js',)
self.assertIn('some-file.js', str(formset_factory(MediaForm, extra=0)().media))
def test_empty_formset_is_multipart(self):
"""Make sure `is_multipart()` works with empty formset, refs #19545"""
class FileForm(Form):
file = FileField()
self.assertTrue(formset_factory(FileForm, extra=0)().is_multipart())
| 42.431483
| 119
| 0.603163
|
from __future__ import unicode_literals
import datetime
from collections import Counter
from django.forms import (
BaseForm, CharField, DateField, FileField, Form, IntegerField,
SplitDateTimeField, ValidationError, formsets,
)
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.test import SimpleTestCase, mock
from django.utils.encoding import force_text
class Choice(Form):
choice = CharField()
votes = IntegerField()
ChoiceFormSet = formset_factory(Choice)
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink['name'] in seen_drinks:
raise ValidationError('You may only specify a drink once.')
seen_drinks.append(drink['name'])
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError("Clean method called")
# error if there are any duplicates. Used in ``test_clean_hook``,
# ``test_regression_6926`` & ``test_regression_12878``.
FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm, formset=BaseFavoriteDrinksFormSet, extra=3)
# Used in ``test_formset_splitdatetimefield``.
class SplitDateTimeForm(Form):
when = SplitDateTimeField(initial=datetime.datetime.now)
SplitDateTimeFormSet = formset_factory(SplitDateTimeForm)
class CustomKwargForm(Form):
def __init__(self, *args, **kwargs):
self.custom_kwarg = kwargs.pop('custom_kwarg')
super(CustomKwargForm, self).__init__(*args, **kwargs)
class FormsFormsetTestCase(SimpleTestCase):
def make_choiceformset(
self, formset_data=None, formset_class=ChoiceFormSet,
total_forms=None, initial_forms=0, max_num_forms=0, min_num_forms=0, **kwargs):
kwargs.setdefault('prefix', 'choices')
kwargs.setdefault('auto_id', False)
if formset_data is None:
return formset_class(**kwargs)
if total_forms is None:
total_forms = len(formset_data)
def prefixed(*args):
args = (kwargs['prefix'],) + args
return '-'.join(args)
data = {
prefixed('TOTAL_FORMS'): str(total_forms),
prefixed('INITIAL_FORMS'): str(initial_forms),
prefixed('MAX_NUM_FORMS'): str(max_num_forms),
prefixed('MIN_NUM_FORMS'): str(min_num_forms),
}
for i, (choice, votes) in enumerate(formset_data):
data[prefixed(str(i), 'choice')] = choice
data[prefixed(str(i), 'votes')] = votes
return formset_class(data, **kwargs)
def test_basic_formset(self):
# A FormSet constructor takes the same arguments as Form. Let's create a FormSet
formset = self.make_choiceformset()
self.assertHTMLEqual(
str(formset),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="1000" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" /></td></tr>"""
)
# We treat FormSet pretty much like we would treat a normal Form. FormSet has an
# is_valid method, and a cleaned_data or errors attribute depending on whether all
# the forms passed validation. However, unlike a Form instance, cleaned_data and
# errors will be a list of dicts rather than just a single dict.
formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}])
# If a FormSet was not passed any data, its is_valid and has_changed
# methods should return False.
formset = self.make_choiceformset()
self.assertFalse(formset.is_valid())
self.assertFalse(formset.has_changed())
def test_form_kwargs_formset(self):
FormSet = formset_factory(CustomKwargForm, extra=2)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
for form in formset:
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, 1)
def test_form_kwargs_formset_dynamic(self):
class DynamicBaseFormSet(BaseFormSet):
def get_form_kwargs(self, index):
return {'custom_kwarg': index}
DynamicFormSet = formset_factory(CustomKwargForm, formset=DynamicBaseFormSet, extra=2)
formset = DynamicFormSet(form_kwargs={'custom_kwarg': 'ignored'})
for i, form in enumerate(formset):
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, i)
def test_form_kwargs_empty_form(self):
FormSet = formset_factory(CustomKwargForm)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
self.assertTrue(hasattr(formset.empty_form, 'custom_kwarg'))
self.assertEqual(formset.empty_form.custom_kwarg, 1)
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': ['This field is required.']}])
def test_formset_validation_count(self):
def make_method_counter(func):
counter = Counter()
counter.call_count = 0
def mocked_func(*args, **kwargs):
counter.call_count += 1
return func(*args, **kwargs)
return mocked_func, counter
mocked_is_valid, is_valid_counter = make_method_counter(formsets.ManagementForm.is_valid)
mocked_full_clean, full_clean_counter = make_method_counter(BaseForm.full_clean)
formset = self.make_choiceformset([('Calexico', '100'), ('Any1', '42'), ('Any2', '101')])
with mock.patch('django.forms.formsets.ManagementForm.is_valid', mocked_is_valid), \
mock.patch('django.forms.forms.BaseForm.full_clean', mocked_full_clean):
self.assertTrue(formset.is_valid())
self.assertEqual(is_valid_counter.call_count, 1)
self.assertEqual(full_clean_counter.call_count, 4)
def test_formset_has_changed(self):
# FormSet instances has_changed method will be True if any data is
# passed to his forms, even if the formset didn't validate
blank_formset = self.make_choiceformset([('', '')])
self.assertFalse(blank_formset.has_changed())
invalid_formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(invalid_formset.is_valid())
self.assertTrue(invalid_formset.has_changed())
valid_formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(valid_formset.is_valid())
self.assertTrue(valid_formset.has_changed())
def test_formset_initial_data(self):
initial = [{'choice': 'Calexico', 'votes': 100}]
formset = self.make_choiceformset(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>"""
)
formset = self.make_choiceformset([('Calexico', '100'), ('', '')], initial_forms=1)
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}])
def test_second_form_partially_filled(self):
# But the second form was blank! Shouldn't we get some errors? No. If we display
# one of the fields of a blank form though, it will be validated. We may want to
# required that at least x number of forms are completed, but we'll show how to
formset = self.make_choiceformset([('Calexico', '100'), ('The Decemberists', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}])
def test_delete_prefilled_data(self):
formset = self.make_choiceformset([('', ''), ('', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors,
[{'votes': ['This field is required.'], 'choice': ['This field is required.']}, {}]
)
def test_displaying_more_than_one_blank_form(self):
'choices-0-choice': '',
'choices-0-votes': '',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_min_num_displaying_more_than_one_blank_form(self):
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=1)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertFalse(formset.forms[0].empty_permitted)
self.assertTrue(formset.forms[1].empty_permitted)
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>"""
)
def test_min_num_displaying_more_than_one_blank_form_with_zero_extra(self):
ChoiceFormSet = formset_factory(Choice, extra=0, min_num=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>"""
)
def test_single_form_completed(self):
data = {
'choices-TOTAL_FORMS': '3',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '0',
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}, {}])
def test_formset_validate_max_flag(self):
# value in the returned data is not checked)
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.'])
def test_formset_validate_min_flag(self):
# If validate_min is set and min_num is more than TOTAL_FORMS in the
# data, then throw an exception. MIN_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '0',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=3, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 3 or more forms.'])
def test_formset_validate_min_unchanged_forms(self):
initial = [
{'choice': 'Zero', 'votes': 0},
{'choice': 'One', 'votes': 0},
]
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '2',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '2',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, min_num=2, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices', initial=initial)
self.assertFalse(formset.forms[0].has_changed())
self.assertTrue(formset.forms[1].has_changed())
self.assertTrue(formset.is_valid())
def test_formset_validate_min_excludes_empty_forms(self):
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '0',
}
ChoiceFormSet = formset_factory(Choice, extra=2, min_num=1, validate_min=True, can_delete=True)
formset = ChoiceFormSet(data, prefix='choices')
self.assertFalse(formset.has_changed())
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or more forms.'])
def test_second_form_partially_filled_2(self):
data = {
'choices-TOTAL_FORMS': '3',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '0',
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}, {}])
def test_more_initial_data(self):
initial = [{'choice': 'Calexico', 'votes': 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>"""
)
self.assertTrue(formset.empty_form.empty_permitted)
self.assertHTMLEqual(
formset.empty_form.as_ul(),
"""<li>Choice: <input type="text" name="choices-__prefix__-choice" /></li>
<li>Votes: <input type="number" name="choices-__prefix__-votes" /></li>"""
)
def test_formset_with_deletion(self):
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-DELETE': 'on',
'choices-2-choice': '',
'choices-2-votes': '',
'choices-2-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.forms],
[
{'votes': 100, 'DELETE': False, 'choice': 'Calexico'},
{'votes': 900, 'DELETE': True, 'choice': 'Fergie'},
{},
]
)
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'choice': 'Fergie'}]
)
# it's going to be deleted.
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
'check-TOTAL_FORMS': '3',
'check-INITIAL_FORMS': '2',
'choices-MIN_NUM_FORMS': '0',
'check-MAX_NUM_FORMS': '0',
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
'check-2-field': '',
'check-2-DELETE': '',
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
data['check-1-DELETE'] = ''
formset = CheckFormSet(data, prefix='check')
self.assertFalse(formset.is_valid())
class Person(Form):
name = CharField()
PeopleForm = formset_factory(
form=Person,
can_delete=True)
p = PeopleForm(
{'form-0-name': '', 'form-0-DELETE': 'on',
'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0, 'form-MAX_NUM_FORMS': 1})
self.assertTrue(p.is_valid())
self.assertEqual(len(p.deleted_forms), 1)
def test_formsets_with_ordering(self):
ice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
])
def test_empty_ordered_fields(self):
data = {
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '3',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '0',
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '',
'choices-3-choice': 'Basia Bulat',
'choices-3-votes': '50',
'choices-3-ORDER': '',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
{'votes': 500, 'ORDER': None, 'choice': 'The Decemberists'},
{'votes': 50, 'ORDER': None, 'choice': 'Basia Bulat'},
])
def test_ordering_blank_fieldsets(self):
data = {
'choices-TOTAL_FORMS': '3',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '0',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [])
def test_formset_with_ordering_and_deletion(self):
number" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists" /></li>
<li>Votes: <input type="number" name="choices-2-votes" value="500" /></li>
<li>Order: <input type="number" name="choices-2-ORDER" value="3" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>
<li>Order: <input type="number" name="choices-3-ORDER" /></li>
<li>Delete: <input type="checkbox" name="choices-3-DELETE" /></li>"""
)
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '3',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '0',
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-1-DELETE': 'on',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
'choices-2-DELETE': '',
'choices-3-choice': '',
'choices-3-votes': '',
'choices-3-ORDER': '',
'choices-3-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': 'Calexico'},
])
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': 'Fergie'}]
)
def test_invalid_deleted_form_with_ordering(self):
class Person(Form):
name = CharField()
PeopleForm = formset_factory(form=Person, can_delete=True, can_order=True)
p = PeopleForm({
'form-0-name': '',
'form-0-DELETE': 'on',
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0,
'form-MAX_NUM_FORMS': 1
})
self.assertTrue(p.is_valid())
self.assertEqual(p.ordered_forms, [])
def test_clean_hook(self):
rinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th><td>
<input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>"""
)
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>"""
)
def test_max_num_with_initial_data(self):
initial = [
{'name': 'Fernet and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>"""
)
def test_max_num_zero(self):
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
initial = [
{'name': 'Fernet and Coke'},
{'name': 'Bloody Mary'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Fernet and Coke" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>"""
)
def test_more_initial_than_max_num(self):
initial = [
{'name': 'Gin Tonic'},
{'name': 'Bloody Mary'},
{'name': 'Jack and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Gin Tonic" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th>
<td><input id="id_form-2-name" name="form-2-name" type="text" value="Jack and Coke" /></td></tr>"""
)
initial = [
{'name': 'Gin Tonic'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>"""
)
def test_regression_6926(self):
formset = FavoriteDrinksFormSet(error_class=CustomErrorList)
self.assertEqual(formset.forms[0].error_class, CustomErrorList)
def test_formset_calls_forms_is_valid(self):
hoice(Choice):
def is_valid(self):
self.is_valid_called = True
return super(AnotherChoice, self).is_valid()
AnotherChoiceFormSet = formset_factory(AnotherChoice)
data = {
'choices-TOTAL_FORMS': '1',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '0',
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
formset = AnotherChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertTrue(all(form.is_valid_called for form in formset.forms))
def test_hard_limit_on_instantiated_forms(self):
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 2
ChoiceFormSet = formset_factory(Choice, max_num=1)
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
self.assertEqual(len(formset.forms), 3)
self.assertFalse(formset.is_valid())
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_increase_hard_limit(self):
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 3
# for this form, we want a limit of 4
ChoiceFormSet = formset_factory(Choice, max_num=4)
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# Four forms are instantiated and no exception is raised
self.assertEqual(len(formset.forms), 4)
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_non_form_errors_run_full_clean(self):
# Regression test for #11160
# If non_form_errors() is called without calling is_valid() first,
# it should ensure that full_clean() is called.
class BaseCustomFormSet(BaseFormSet):
def clean(self):
raise ValidationError("This is a non-form error")
ChoiceFormSet = formset_factory(Choice, formset=BaseCustomFormSet)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIsInstance(formset.non_form_errors(), ErrorList)
self.assertEqual(list(formset.non_form_errors()), ['This is a non-form error'])
def test_validate_max_ignores_forms_marked_for_deletion(self):
class CheckForm(Form):
field = IntegerField()
data = {
'check-TOTAL_FORMS': '2',
'check-INITIAL_FORMS': '0',
'check-MAX_NUM_FORMS': '1',
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
}
CheckFormSet = formset_factory(CheckForm, max_num=1, validate_max=True,
can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
def test_formset_total_error_count(self):
data = [ # formset_data, expected error count
([('Calexico', '100')], 0),
([('Calexico', '')], 1),
([('', 'invalid')], 2),
([('Calexico', '100'), ('Calexico', '')], 1),
([('Calexico', ''), ('Calexico', '')], 2),
]
for formset_data, expected_error_count in data:
formset = self.make_choiceformset(formset_data)
self.assertEqual(formset.total_error_count(), expected_error_count)
def test_formset_total_error_count_with_non_form_errors(self):
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 1)
data['choices-1-votes'] = ''
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 2)
def test_html_safe(self):
formset = self.make_choiceformset()
self.assertTrue(hasattr(formset, '__html__'))
self.assertEqual(force_text(formset), formset.__html__())
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
class FormsetAsFooTests(SimpleTestCase):
def test_as_table(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(
formset.as_table(),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" value="Calexico" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" value="100" /></td></tr>"""
)
def test_as_p(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(
formset.as_p(),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<p>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></p>
<p>Votes: <input type="number" name="choices-0-votes" value="100" /></p>"""
)
def test_as_ul(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(
formset.as_ul(),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>"""
)
# Regression test for #11418 #################################################
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(SimpleTestCase):
def test_no_data_raises_validation_error(self):
with self.assertRaises(ValidationError):
ArticleFormSet({}).is_valid()
def test_with_management_data_attrs_work_fine(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_caught_by_formset(self):
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
'form-1-title': 'Test',
'form-1-pub_date': '', # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual([{}, {'pub_date': ['This field is required.']}], formset.errors)
def test_empty_forms_are_unbound(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = [
unbound_formset.empty_form,
bound_formset.empty_form
]
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
class TestEmptyFormSet(SimpleTestCase):
def test_empty_formset_is_valid(self):
EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate)
formset = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '0'},
prefix="form",
)
formset2 = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '1', 'form-0-name': 'bah'},
prefix="form",
)
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
def test_empty_formset_media(self):
class MediaForm(Form):
class Media:
js = ('some-file.js',)
self.assertIn('some-file.js', str(formset_factory(MediaForm, extra=0)().media))
def test_empty_formset_is_multipart(self):
class FileForm(Form):
file = FileField()
self.assertTrue(formset_factory(FileForm, extra=0)().is_multipart())
| true
| true
|
1c431e9494b623ae6f2fdbb6c1b0e694576fc004
| 2,641
|
py
|
Python
|
module4-acid-and-database-scalability-tradeoffs/Assignment/assignment_mongo.py
|
singparvi/DS-Unit-3-Sprint-2-SQL-and-Databases
|
7d61f09a410ea91731caddb4fcc96b84cb9b0221
|
[
"MIT"
] | null | null | null |
module4-acid-and-database-scalability-tradeoffs/Assignment/assignment_mongo.py
|
singparvi/DS-Unit-3-Sprint-2-SQL-and-Databases
|
7d61f09a410ea91731caddb4fcc96b84cb9b0221
|
[
"MIT"
] | null | null | null |
module4-acid-and-database-scalability-tradeoffs/Assignment/assignment_mongo.py
|
singparvi/DS-Unit-3-Sprint-2-SQL-and-Databases
|
7d61f09a410ea91731caddb4fcc96b84cb9b0221
|
[
"MIT"
] | null | null | null |
import pymongo
# now make a connection with mongo db and test connection
mongo_client = pymongo.MongoClient(
'mongodb+srv://singparvi:qwerty12345@cluster0.l0ldo.mongodb.net/myFirstDatabase?retryWrites=true&w=majority')
rpg_collections = mongo_client.myFirstDatabase.rpg_collections
# How many total Characters are there?
print('How many total Characters are there?: ', rpg_collections.count())
print("\n")
# How many total Items? ok
items_list = []
for document in rpg_collections.find():
# pprint.pprint(document)
items_list.append(document['items'])
# flatten list
flat_list = [item for sublist in items_list for item in sublist]
# get the number of unique in the items
print('How many total Items?: ', len(set(flat_list)))
print("\n")
# How many of the Items are weapons? How many are not? ok
items_list = []
for document in rpg_collections.find():
# pprint.pprint(document)
items_list.append(document['weapons'])
# flatten list
flat_list = [item for sublist in items_list for item in sublist]
# get the number of unique in the items
print('How many of the Items are weapons? ', len(set(flat_list)))
print("\n")
# How many Items does each character have? (Return first 20 rows)
print('How many Items does each character have? (Return first 20 rows)')
items_list = []
i = 0
for document in rpg_collections.find():
# pprint.pprint(document)
if i < 20:
i = i + 1
print('Item ', i, document['name'], 'has ', len(document['items']), 'items')
print()
# How many Weapons does each character have? (Return first 20 rows)
print('How many Weapons does each character have? (Return first 20 rows)')
items_list = []
i = 0
for document in rpg_collections.find():
# pprint.pprint(document)
if i < 20:
i = i + 1
print('Item ', i, document['name'], 'has ', len(document['weapons']), 'items')
print()
# On average, how many Items does each Character have?
items_list = []
i = 0
for document in rpg_collections.find():
# pprint.pprint(document)
items_list.append(document['items'])
# flatten the list
flat_list = [item for sublist in items_list for item in sublist]
print('On average, how many Items does each Character have? ', len(flat_list) / rpg_collections.count())
# On average, how many Weapons does each character have?
items_list = []
i = 0
for document in rpg_collections.find():
# pprint.pprint(document)
items_list.append(document['weapons'])
# flatten the list
flat_list = [item for sublist in items_list for item in sublist]
print('On average, how many Items does each Character have? ', len(flat_list) / rpg_collections.count())
| 33.43038
| 113
| 0.711094
|
import pymongo
mongo_client = pymongo.MongoClient(
'mongodb+srv://singparvi:qwerty12345@cluster0.l0ldo.mongodb.net/myFirstDatabase?retryWrites=true&w=majority')
rpg_collections = mongo_client.myFirstDatabase.rpg_collections
print('How many total Characters are there?: ', rpg_collections.count())
print("\n")
items_list = []
for document in rpg_collections.find():
items_list.append(document['items'])
flat_list = [item for sublist in items_list for item in sublist]
print('How many total Items?: ', len(set(flat_list)))
print("\n")
items_list = []
for document in rpg_collections.find():
items_list.append(document['weapons'])
flat_list = [item for sublist in items_list for item in sublist]
print('How many of the Items are weapons? ', len(set(flat_list)))
print("\n")
print('How many Items does each character have? (Return first 20 rows)')
items_list = []
i = 0
for document in rpg_collections.find():
if i < 20:
i = i + 1
print('Item ', i, document['name'], 'has ', len(document['items']), 'items')
print()
print('How many Weapons does each character have? (Return first 20 rows)')
items_list = []
i = 0
for document in rpg_collections.find():
if i < 20:
i = i + 1
print('Item ', i, document['name'], 'has ', len(document['weapons']), 'items')
print()
items_list = []
i = 0
for document in rpg_collections.find():
items_list.append(document['items'])
flat_list = [item for sublist in items_list for item in sublist]
print('On average, how many Items does each Character have? ', len(flat_list) / rpg_collections.count())
items_list = []
i = 0
for document in rpg_collections.find():
items_list.append(document['weapons'])
flat_list = [item for sublist in items_list for item in sublist]
print('On average, how many Items does each Character have? ', len(flat_list) / rpg_collections.count())
| true
| true
|
1c431e9d9998ea751e1c2815fa1a80b524b7e566
| 2,388
|
py
|
Python
|
var/spack/repos/builtin/packages/expect/package.py
|
williamfgc/spack
|
c8c795e7dbde22dc47c9ae285a4dd59004b115b1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/expect/package.py
|
williamfgc/spack
|
c8c795e7dbde22dc47c9ae285a4dd59004b115b1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/expect/package.py
|
williamfgc/spack
|
c8c795e7dbde22dc47c9ae285a4dd59004b115b1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
import os
class Expect(AutotoolsPackage):
"""Expect is a tool for automating interactive applications such as
telnet, ftp, passwd, fsck, rlogin, tip, etc."""
homepage = "http://expect.sourceforge.net/"
url = "https://sourceforge.net/projects/expect/files/Expect/5.45/expect5.45.tar.gz/download"
version('5.45', '44e1a4f4c877e9ddc5a542dfa7ecc92b')
depends_on('tcl')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
force_autoreconf = True
patch('expect_detect_tcl_private_header_os_x_mountain_lion.patch', when='@5.45')
def configure_args(self):
spec = self.spec
args = [
# Without this, expect binary and library are not installed
'--exec-prefix={0}'.format(self.prefix),
'--enable-threads',
'--enable-shared',
'--enable-64bit',
'--with-tcl={0}'.format(spec['tcl'].prefix.lib),
'--with-tclinclude={0}'.format(spec['tcl'].prefix.include),
]
return args
@run_after('install')
def symlink_library(self):
"""Expect installs libraries into:
lib/expect5.45/libexpect5.45.so
Create a symlink so that the library can be found in lib."""
target = join_path(self.prefix.lib, 'expect*', 'libexpect*')
target = glob.glob(target)[0]
link_name = os.path.basename(target)
link_name = join_path(self.prefix.lib, link_name)
symlink(target, link_name)
@run_after('install')
def darwin_fix(self):
# The shared library is not installed correctly on Darwin; fix this
if self.spec.satisfies('platform=darwin'):
fix_darwin_install_name(
join_path(self.prefix.lib, 'expect{0}'.format(self.version)))
old = 'libexpect{0}.dylib'.format(self.version)
new = glob.glob(join_path(self.prefix.lib, 'expect*', 'libexpect*'))[0]
install_name_tool = Executable('install_name_tool')
install_name_tool('-change', old, new, self.prefix.bin.expect)
| 32.712329
| 101
| 0.64196
|
from spack import *
import glob
import os
class Expect(AutotoolsPackage):
homepage = "http://expect.sourceforge.net/"
url = "https://sourceforge.net/projects/expect/files/Expect/5.45/expect5.45.tar.gz/download"
version('5.45', '44e1a4f4c877e9ddc5a542dfa7ecc92b')
depends_on('tcl')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
force_autoreconf = True
patch('expect_detect_tcl_private_header_os_x_mountain_lion.patch', when='@5.45')
def configure_args(self):
spec = self.spec
args = [
'--exec-prefix={0}'.format(self.prefix),
'--enable-threads',
'--enable-shared',
'--enable-64bit',
'--with-tcl={0}'.format(spec['tcl'].prefix.lib),
'--with-tclinclude={0}'.format(spec['tcl'].prefix.include),
]
return args
@run_after('install')
def symlink_library(self):
target = join_path(self.prefix.lib, 'expect*', 'libexpect*')
target = glob.glob(target)[0]
link_name = os.path.basename(target)
link_name = join_path(self.prefix.lib, link_name)
symlink(target, link_name)
@run_after('install')
def darwin_fix(self):
if self.spec.satisfies('platform=darwin'):
fix_darwin_install_name(
join_path(self.prefix.lib, 'expect{0}'.format(self.version)))
old = 'libexpect{0}.dylib'.format(self.version)
new = glob.glob(join_path(self.prefix.lib, 'expect*', 'libexpect*'))[0]
install_name_tool = Executable('install_name_tool')
install_name_tool('-change', old, new, self.prefix.bin.expect)
| true
| true
|
1c431ed63ca861ca01aa3f842b457ff21544124b
| 4,462
|
py
|
Python
|
gepify/services/mobile_api/views.py
|
nvlbg/gepify
|
2e937535e2835f6bd47cd8a6026dc7fe2c6c58ca
|
[
"MIT"
] | 7
|
2016-07-01T00:27:02.000Z
|
2019-07-27T18:07:22.000Z
|
gepify/services/mobile_api/views.py
|
nvlbg/gepify
|
2e937535e2835f6bd47cd8a6026dc7fe2c6c58ca
|
[
"MIT"
] | 5
|
2016-08-13T10:40:43.000Z
|
2021-04-30T20:44:54.000Z
|
gepify/services/mobile_api/views.py
|
nvlbg/gepify
|
2e937535e2835f6bd47cd8a6026dc7fe2c6c58ca
|
[
"MIT"
] | null | null | null |
from . import mobile_api_service
from flask import request, current_app, jsonify
from .view_decorators import access_key_required
from gepify.providers import (
songs, SUPPORTED_FORMATS, SUPPORTED_PROVIDERS, MIMETYPES
)
from gepify.services.spotify.models import (
SPOTIFY_AUTHORIZATION_DATA
)
import requests
import json
from gepify.influxdb import influxdb
from ..util import send_file
SPOTIFY_REDIRECT_URI = 'spotify-auth://callback'
@mobile_api_service.route('/get_access_token/<code>')
def get_access_token(code):
influxdb.count('mobile_api.access_token_requests')
payload = {
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': SPOTIFY_REDIRECT_URI
}
headers = {
'Authorization': 'Basic {}'.format(SPOTIFY_AUTHORIZATION_DATA)
}
try:
post_request = requests.post('https://accounts.spotify.com/api/token',
data=payload, headers=headers)
if post_request.status_code == 200:
response_data = json.loads(post_request.text)
access_token = response_data['access_token']
refresh_token = response_data.get('refresh_token', None)
expires_in = response_data['expires_in']
return jsonify(
access_token=access_token,
refresh_token=refresh_token,
expires_in=expires_in)
else:
raise RuntimeError('Could not get authentication token')
except Exception as e:
current_app.logger.error(
'Could not authenticate spotify user: {}'.format(e))
return jsonify(
error='There was an error while trying to authenticate you.'
'Please, try again.'), 503
@mobile_api_service.route('/refresh_access_token/<refresh_token>')
def refresh_access_token(refresh_token):
influxdb.count('mobile_api.refresh_access_token_requests')
payload = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token'
}
headers = {
'Authorization': 'Basic {}'.format(SPOTIFY_AUTHORIZATION_DATA)
}
try:
post_request = requests.post('https://accounts.spotify.com/api/token',
data=payload, headers=headers)
if post_request.status_code == 200:
response_data = json.loads(post_request.text)
access_token = response_data['access_token']
expires_in = response_data['expires_in']
return jsonify(
access_token=access_token,
expires_in=expires_in)
else:
raise RuntimeError('Could not get authentication token')
except Exception as e:
current_app.logger.error(
'Could not authenticate spotify user: {}'.format(e))
return jsonify(
error='There was an error while trying to authenticate you.'
'Please, try again.'), 503
@mobile_api_service.route('/download_song/<path:song_name>/<format>')
@access_key_required
def download_song(song_name, format):
influxdb.count('mobile_api.download_song_requests')
if format not in SUPPORTED_FORMATS:
current_app.logger.warning(
'User tried to download a song in unsupported format.\n' +
'Song: {}\n'.format(song_name) +
'Format: {}\n'.format(format)
)
return jsonify(reason='Unsupported format'), 400
if not songs.has_song_format(song_name, format):
provider = request.args.get('provider', SUPPORTED_PROVIDERS[0])
if provider not in SUPPORTED_PROVIDERS:
current_app.logger.warning(
'User tried to download a song with unsupported provider.\n' +
'Song: {}\n'.format(song_name) +
'Format: {}\n'.format(format) +
'Provider: {}\n'.format(provider)
)
return jsonify(reason='Unsupported provider'), 400
song = {'name': song_name}
songs.download_song.delay(song, format=format, provider=provider)
return jsonify(
refresh_after=30,
message='Your song has started downloading.')
influxdb.count('mobile_api.downloaded_songs')
song = songs.get_song(song_name)
return send_file(
song['files'][format],
as_attachment=True,
attachment_filename='{}.{}'.format(song['name'], format),
mimetype=MIMETYPES[format]
)
| 34.859375
| 78
| 0.636486
|
from . import mobile_api_service
from flask import request, current_app, jsonify
from .view_decorators import access_key_required
from gepify.providers import (
songs, SUPPORTED_FORMATS, SUPPORTED_PROVIDERS, MIMETYPES
)
from gepify.services.spotify.models import (
SPOTIFY_AUTHORIZATION_DATA
)
import requests
import json
from gepify.influxdb import influxdb
from ..util import send_file
SPOTIFY_REDIRECT_URI = 'spotify-auth://callback'
@mobile_api_service.route('/get_access_token/<code>')
def get_access_token(code):
influxdb.count('mobile_api.access_token_requests')
payload = {
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': SPOTIFY_REDIRECT_URI
}
headers = {
'Authorization': 'Basic {}'.format(SPOTIFY_AUTHORIZATION_DATA)
}
try:
post_request = requests.post('https://accounts.spotify.com/api/token',
data=payload, headers=headers)
if post_request.status_code == 200:
response_data = json.loads(post_request.text)
access_token = response_data['access_token']
refresh_token = response_data.get('refresh_token', None)
expires_in = response_data['expires_in']
return jsonify(
access_token=access_token,
refresh_token=refresh_token,
expires_in=expires_in)
else:
raise RuntimeError('Could not get authentication token')
except Exception as e:
current_app.logger.error(
'Could not authenticate spotify user: {}'.format(e))
return jsonify(
error='There was an error while trying to authenticate you.'
'Please, try again.'), 503
@mobile_api_service.route('/refresh_access_token/<refresh_token>')
def refresh_access_token(refresh_token):
influxdb.count('mobile_api.refresh_access_token_requests')
payload = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token'
}
headers = {
'Authorization': 'Basic {}'.format(SPOTIFY_AUTHORIZATION_DATA)
}
try:
post_request = requests.post('https://accounts.spotify.com/api/token',
data=payload, headers=headers)
if post_request.status_code == 200:
response_data = json.loads(post_request.text)
access_token = response_data['access_token']
expires_in = response_data['expires_in']
return jsonify(
access_token=access_token,
expires_in=expires_in)
else:
raise RuntimeError('Could not get authentication token')
except Exception as e:
current_app.logger.error(
'Could not authenticate spotify user: {}'.format(e))
return jsonify(
error='There was an error while trying to authenticate you.'
'Please, try again.'), 503
@mobile_api_service.route('/download_song/<path:song_name>/<format>')
@access_key_required
def download_song(song_name, format):
influxdb.count('mobile_api.download_song_requests')
if format not in SUPPORTED_FORMATS:
current_app.logger.warning(
'User tried to download a song in unsupported format.\n' +
'Song: {}\n'.format(song_name) +
'Format: {}\n'.format(format)
)
return jsonify(reason='Unsupported format'), 400
if not songs.has_song_format(song_name, format):
provider = request.args.get('provider', SUPPORTED_PROVIDERS[0])
if provider not in SUPPORTED_PROVIDERS:
current_app.logger.warning(
'User tried to download a song with unsupported provider.\n' +
'Song: {}\n'.format(song_name) +
'Format: {}\n'.format(format) +
'Provider: {}\n'.format(provider)
)
return jsonify(reason='Unsupported provider'), 400
song = {'name': song_name}
songs.download_song.delay(song, format=format, provider=provider)
return jsonify(
refresh_after=30,
message='Your song has started downloading.')
influxdb.count('mobile_api.downloaded_songs')
song = songs.get_song(song_name)
return send_file(
song['files'][format],
as_attachment=True,
attachment_filename='{}.{}'.format(song['name'], format),
mimetype=MIMETYPES[format]
)
| true
| true
|
1c431f161f6d7ee8e4a8f7bd87cf3977a6535807
| 179
|
py
|
Python
|
npy2h5py.py
|
rahmanabir/BanglaNLP
|
6a2c03fd30ce277c344093b54674c00774f0bc44
|
[
"MIT"
] | 1
|
2021-12-25T18:23:26.000Z
|
2021-12-25T18:23:26.000Z
|
npy2h5py.py
|
rahmanabir/BanglaNLP
|
6a2c03fd30ce277c344093b54674c00774f0bc44
|
[
"MIT"
] | null | null | null |
npy2h5py.py
|
rahmanabir/BanglaNLP
|
6a2c03fd30ce277c344093b54674c00774f0bc44
|
[
"MIT"
] | 1
|
2019-12-25T12:05:40.000Z
|
2019-12-25T12:05:40.000Z
|
############
# @desc:
# Codebase that deals with converting .npy files into h5py files for more efficient dataloader,
import pandas as pd
import numpy as np
import h5py
| 17.9
| 101
| 0.687151
| true
| true
|
|
1c4320f44c460f4434ca770e8eff950080373dd1
| 1,221
|
py
|
Python
|
src/Route.py
|
ganemone/sublime-bart
|
1fcd72062914891cffac840d814eb129ebd43edf
|
[
"MIT"
] | 6
|
2015-02-22T17:40:33.000Z
|
2016-07-11T19:18:37.000Z
|
src/Route.py
|
ganemone/sublime-bart
|
1fcd72062914891cffac840d814eb129ebd43edf
|
[
"MIT"
] | null | null | null |
src/Route.py
|
ganemone/sublime-bart
|
1fcd72062914891cffac840d814eb129ebd43edf
|
[
"MIT"
] | 1
|
2017-07-06T15:27:20.000Z
|
2017-07-06T15:27:20.000Z
|
from .stations import station_map
class Route:
def __init__(self, trip_attrs, legs):
self.origin = trip_attrs['origin']
self.destination = trip_attrs['destination']
self.fare = trip_attrs['fare']
self.departs = trip_attrs['origTimeMin']
self.arrives = trip_attrs['destTimeMin']
self.legs = legs
def has_transfer(self):
return len(self.legs) > 1
def num_transfers(self):
return len(self.legs) - 1
def short_description(self):
return [
'Departs: ' + self.departs,
'Arrives: ' + self.arrives,
'Transfers: {0}'.format(self.num_transfers())
]
def long_description(self):
s = []
for leg in self.legs:
attrs = leg.attrib
s.append('{departs}: {origin} to {dest} \n'.format(
departs=attrs['origTimeMin'],
origin=station_map[attrs['origin'].lower()],
dest=station_map[attrs['destination'].lower()]
))
s.append('{arrives}: Arrive at {destination}'.format(
destination=station_map[self.destination.lower()],
arrives=self.arrives
))
return s
| 29.071429
| 63
| 0.562654
|
from .stations import station_map
class Route:
def __init__(self, trip_attrs, legs):
self.origin = trip_attrs['origin']
self.destination = trip_attrs['destination']
self.fare = trip_attrs['fare']
self.departs = trip_attrs['origTimeMin']
self.arrives = trip_attrs['destTimeMin']
self.legs = legs
def has_transfer(self):
return len(self.legs) > 1
def num_transfers(self):
return len(self.legs) - 1
def short_description(self):
return [
'Departs: ' + self.departs,
'Arrives: ' + self.arrives,
'Transfers: {0}'.format(self.num_transfers())
]
def long_description(self):
s = []
for leg in self.legs:
attrs = leg.attrib
s.append('{departs}: {origin} to {dest} \n'.format(
departs=attrs['origTimeMin'],
origin=station_map[attrs['origin'].lower()],
dest=station_map[attrs['destination'].lower()]
))
s.append('{arrives}: Arrive at {destination}'.format(
destination=station_map[self.destination.lower()],
arrives=self.arrives
))
return s
| true
| true
|
1c4321b177e5119519cffe391b90b022962114aa
| 2,203
|
py
|
Python
|
torchfly_dev/common/logging_util.py
|
ECS-251-W2020/final-project-TorchFly
|
69f60b337c5dec0b1cd8315c194bc7891ba98d3a
|
[
"MIT"
] | null | null | null |
torchfly_dev/common/logging_util.py
|
ECS-251-W2020/final-project-TorchFly
|
69f60b337c5dec0b1cd8315c194bc7891ba98d3a
|
[
"MIT"
] | 3
|
2021-06-08T21:07:12.000Z
|
2021-12-13T20:41:53.000Z
|
torchfly_dev/common/logging_util.py
|
ECS-251-W2020/final-project-TorchFly
|
69f60b337c5dec0b1cd8315c194bc7891ba98d3a
|
[
"MIT"
] | 1
|
2020-02-19T00:53:21.000Z
|
2020-02-19T00:53:21.000Z
|
import os
import sys
import hydra
import hydra.utils
import logging
import colorlog
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
def configure_logging(config: DictConfig = None) -> None:
"""
This function initializes the logging. It is recommended to use Hydra to
configure the training and pass the config to this function.
Args:
config: A DictConfig from hydra.main
"""
if config is None:
config = DictConfig(
{
"logging": {
"log_dir": "logs",
"level": "INFO",
"color": True,
},
"training": {
"rank": 0,
"num_gpus_per_node": 1,
},
}
)
elif config.logging.log_dir is None:
log_dir = "logs"
else:
log_dir = config.logging.log_dir
os.makedirs(log_dir, exist_ok=True)
# Only setup training for node 0
if not hasattr(config.training, "rank") or config.training.rank == 0 or config.training.rank is None:
root = logging.getLogger()
root.setLevel(getattr(logging, config.logging.level))
# setup formaters
file_formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s] - %(message)s")
if config.logging.color:
stream_formater = colorlog.ColoredFormatter(
"[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - %(message)s"
)
else:
stream_formater = file_formatter
# setup handlers
if config.training.num_gpus_per_node > 1:
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(stream_formater)
root.addHandler(stream_handler)
# append the log
file_handler = logging.FileHandler(os.path.join(log_dir, f"experiment.log"), mode='a')
file_handler.setFormatter(file_formatter)
root.addHandler(file_handler)
# def get_original_cwd(config, resume_mode) -> str:
# if resume_mode:
# os.getcwd()
# else:
# return os.getcwd()
| 31.927536
| 126
| 0.588743
|
import os
import sys
import hydra
import hydra.utils
import logging
import colorlog
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
def configure_logging(config: DictConfig = None) -> None:
if config is None:
config = DictConfig(
{
"logging": {
"log_dir": "logs",
"level": "INFO",
"color": True,
},
"training": {
"rank": 0,
"num_gpus_per_node": 1,
},
}
)
elif config.logging.log_dir is None:
log_dir = "logs"
else:
log_dir = config.logging.log_dir
os.makedirs(log_dir, exist_ok=True)
if not hasattr(config.training, "rank") or config.training.rank == 0 or config.training.rank is None:
root = logging.getLogger()
root.setLevel(getattr(logging, config.logging.level))
file_formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s] - %(message)s")
if config.logging.color:
stream_formater = colorlog.ColoredFormatter(
"[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - %(message)s"
)
else:
stream_formater = file_formatter
if config.training.num_gpus_per_node > 1:
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(stream_formater)
root.addHandler(stream_handler)
file_handler = logging.FileHandler(os.path.join(log_dir, f"experiment.log"), mode='a')
file_handler.setFormatter(file_formatter)
root.addHandler(file_handler)
| true
| true
|
1c4321b5f6b36299cb5ed89abcc3d03be5c90012
| 1,496
|
py
|
Python
|
nova/api/openstack/compute/floating_ip_dns.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,874
|
2015-01-04T05:18:34.000Z
|
2022-03-31T03:30:28.000Z
|
nova/api/openstack/compute/floating_ip_dns.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 132
|
2017-03-27T11:31:52.000Z
|
2022-03-30T08:45:02.000Z
|
nova/api/openstack/compute/floating_ip_dns.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 1,996
|
2015-01-04T15:11:51.000Z
|
2022-03-31T11:03:13.000Z
|
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import wsgi
class FloatingIPDNSDomainController(wsgi.Controller):
"""DNS domain controller for OpenStack API."""
@wsgi.expected_errors(410)
def index(self, req):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def update(self, req, id, body):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def delete(self, req, id):
raise exc.HTTPGone()
class FloatingIPDNSEntryController(wsgi.Controller):
"""DNS Entry controller for OpenStack API."""
@wsgi.expected_errors(410)
def show(self, req, domain_id, id):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def update(self, req, domain_id, id, body):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def delete(self, req, domain_id, id):
raise exc.HTTPGone()
| 29.92
| 78
| 0.696524
|
from webob import exc
from nova.api.openstack import wsgi
class FloatingIPDNSDomainController(wsgi.Controller):
@wsgi.expected_errors(410)
def index(self, req):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def update(self, req, id, body):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def delete(self, req, id):
raise exc.HTTPGone()
class FloatingIPDNSEntryController(wsgi.Controller):
@wsgi.expected_errors(410)
def show(self, req, domain_id, id):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def update(self, req, domain_id, id, body):
raise exc.HTTPGone()
@wsgi.expected_errors(410)
def delete(self, req, domain_id, id):
raise exc.HTTPGone()
| true
| true
|
1c432237b4b147a44df931ffd24b32203bcf57f0
| 2,493
|
py
|
Python
|
meggie/actions/tfr_save_tse/__init__.py
|
Teekuningas/meggie
|
0790559febb990a5487d4f0c92987066632e1d99
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2020-04-29T08:57:11.000Z
|
2021-01-15T21:21:51.000Z
|
meggie/actions/tfr_save_tse/__init__.py
|
Teekuningas/meggie
|
0790559febb990a5487d4f0c92987066632e1d99
|
[
"BSD-2-Clause-FreeBSD"
] | 16
|
2019-05-03T10:31:16.000Z
|
2021-05-06T14:59:55.000Z
|
meggie/actions/tfr_save_tse/__init__.py
|
cibr-jyu/meggie
|
0790559febb990a5487d4f0c92987066632e1d99
|
[
"BSD-2-Clause-FreeBSD"
] | 3
|
2020-12-12T09:57:00.000Z
|
2020-12-20T17:12:05.000Z
|
""" Contains save tse action handling.
"""
from meggie.utilities.messaging import exc_messagebox
from meggie.utilities.messaging import messagebox
from meggie.utilities.names import next_available_name
from meggie.utilities.channels import get_channels_by_type
from meggie.utilities.validators import assert_arrays_same
from meggie.mainwindow.dynamic import Action
from meggie.mainwindow.dynamic import subject_action
from meggie.utilities.dialogs.TFROutputOptionsMain import TFROutputOptions
from meggie.actions.tfr_save_tse.controller.tfr import save_tse_channel_averages
from meggie.actions.tfr_save_tse.controller.tfr import save_tse_all_channels
class SaveTSE(Action):
""" Saves TSE's to csv files """
def run(self):
try:
selected_name = self.data['outputs']['tfr'][0]
except IndexError as exc:
return
time_arrays = []
freq_arrays = []
for subject in self.experiment.subjects.values():
tfr = subject.tfr.get(selected_name)
if not tfr:
continue
time_arrays.append(tfr.times)
freq_arrays.append(tfr.freqs)
assert_arrays_same(time_arrays)
assert_arrays_same(freq_arrays, 'Freqs do no match')
def option_handler(params):
params['channel_groups'] = self.experiment.channel_groups
params['name'] = selected_name
try:
self.handler(self.experiment.active_subject, params)
except Exception as exc:
exc_messagebox(self.window, exc)
dialog = TFROutputOptions(self.window, self.experiment,
selected_name, handler=option_handler)
dialog.show()
@subject_action
def handler(self, subject, params):
"""
"""
if params['output_option'] == 'all_channels':
save_tse_all_channels(
self.experiment, params['name'],
params['blmode'], params['blstart'], params['blend'],
params['tmin'], params['tmax'], params['fmin'], params['fmax'],
do_meanwhile=self.window.update_ui)
else:
save_tse_channel_averages(
self.experiment, params['name'],
params['blmode'], params['blstart'], params['blend'],
params['tmin'], params['tmax'], params['fmin'], params['fmax'],
params['channel_groups'], do_meanwhile=self.window.update_ui)
| 36.130435
| 80
| 0.639791
|
from meggie.utilities.messaging import exc_messagebox
from meggie.utilities.messaging import messagebox
from meggie.utilities.names import next_available_name
from meggie.utilities.channels import get_channels_by_type
from meggie.utilities.validators import assert_arrays_same
from meggie.mainwindow.dynamic import Action
from meggie.mainwindow.dynamic import subject_action
from meggie.utilities.dialogs.TFROutputOptionsMain import TFROutputOptions
from meggie.actions.tfr_save_tse.controller.tfr import save_tse_channel_averages
from meggie.actions.tfr_save_tse.controller.tfr import save_tse_all_channels
class SaveTSE(Action):
def run(self):
try:
selected_name = self.data['outputs']['tfr'][0]
except IndexError as exc:
return
time_arrays = []
freq_arrays = []
for subject in self.experiment.subjects.values():
tfr = subject.tfr.get(selected_name)
if not tfr:
continue
time_arrays.append(tfr.times)
freq_arrays.append(tfr.freqs)
assert_arrays_same(time_arrays)
assert_arrays_same(freq_arrays, 'Freqs do no match')
def option_handler(params):
params['channel_groups'] = self.experiment.channel_groups
params['name'] = selected_name
try:
self.handler(self.experiment.active_subject, params)
except Exception as exc:
exc_messagebox(self.window, exc)
dialog = TFROutputOptions(self.window, self.experiment,
selected_name, handler=option_handler)
dialog.show()
@subject_action
def handler(self, subject, params):
if params['output_option'] == 'all_channels':
save_tse_all_channels(
self.experiment, params['name'],
params['blmode'], params['blstart'], params['blend'],
params['tmin'], params['tmax'], params['fmin'], params['fmax'],
do_meanwhile=self.window.update_ui)
else:
save_tse_channel_averages(
self.experiment, params['name'],
params['blmode'], params['blstart'], params['blend'],
params['tmin'], params['tmax'], params['fmin'], params['fmax'],
params['channel_groups'], do_meanwhile=self.window.update_ui)
| true
| true
|
1c432292140d1410b9ad6d30040f346e295f781e
| 13,290
|
py
|
Python
|
numpyro/infer/kernels.py
|
ahmadsalim/numpyro
|
015c80ddd24cf6bc89006fc3a70b424fecd09331
|
[
"Apache-2.0"
] | 3
|
2020-08-25T14:31:08.000Z
|
2020-08-26T02:23:08.000Z
|
numpyro/infer/kernels.py
|
ahmadsalim/numpyro
|
015c80ddd24cf6bc89006fc3a70b424fecd09331
|
[
"Apache-2.0"
] | null | null | null |
numpyro/infer/kernels.py
|
ahmadsalim/numpyro
|
015c80ddd24cf6bc89006fc3a70b424fecd09331
|
[
"Apache-2.0"
] | 1
|
2020-09-11T10:08:27.000Z
|
2020-09-11T10:08:27.000Z
|
from abc import ABC, abstractmethod
from typing import Callable, List, Dict, Tuple
import numpy as np
import numpy.random as npr
import jax.numpy as jnp
import jax.scipy.stats
import jax.scipy.linalg
import numpyro.distributions as dist
from numpyro.util import sqrth, posdef, safe_norm
class PrecondMatrix(ABC):
@abstractmethod
def compute(self, particles: jnp.ndarray, loss_fn: Callable[[jnp.ndarray], float]):
"""
Computes a preconditioning matrix for a given set of particles and a loss function
:param particles: The Stein particles to compute the preconditioning matrix from
:param loss_fn: Loss function given particles
"""
raise NotImplementedError
class SteinKernel(ABC):
@property
@abstractmethod
def mode(self):
"""
Returns the type of kernel, either 'norm' or 'vector' or 'matrix'.
"""
raise NotImplementedError
@abstractmethod
def compute(self, particles: jnp.ndarray, particle_info: Dict[str, Tuple[int, int]],
loss_fn: Callable[[jnp.ndarray], float]):
"""
Computes the kernel function given the input Stein particles
:param particles: The Stein particles to compute the kernel from
:param particle_info: A mapping from parameter names to the position in the particle matrix
:param loss_fn: Loss function given particles
"""
raise NotImplementedError
class RBFKernel(SteinKernel):
"""
Calculates the Gaussian RBF kernel function with median bandwidth.
This is the kernel used in the original "Stein Variational Gradient Descent" paper by Liu and Wang
:param mode: Either 'norm' (default) specifying to take the norm of each particle, 'vector' to return a component-wise kernel
or 'matrix' to return a matrix-valued kernel
:param matrix_mode: Either 'norm_diag' (default) for diagonal filled with the norm kernel or 'vector_diag' for diagonal of vector-valued kernel
:param bandwidth_factor: A multiplier to the bandwidth based on data size n (default 1/log(n))
"""
def __init__(self, mode='norm', matrix_mode='norm_diag',
bandwidth_factor: Callable[[float], float] = lambda n: 1 / jnp.log(n)):
assert mode == 'norm' or mode == 'vector' or mode == 'matrix'
assert matrix_mode == 'norm_diag' or matrix_mode == 'vector_diag'
self._mode = mode
self.matrix_mode = matrix_mode
self.bandwidth_factor = bandwidth_factor
def _normed(self):
return self._mode == 'norm' or (self.mode == 'matrix' and self.matrix_mode == 'norm_diag')
def compute(self, particles, particle_info, loss_fn):
diffs = jnp.expand_dims(particles, axis=0) - jnp.expand_dims(particles, axis=1) # N x N (x D)
if self._normed() and particles.ndim >= 2:
diffs = safe_norm(diffs, ord=2, axis=-1) # N x D -> N
diffs = jnp.reshape(diffs, (diffs.shape[0] * diffs.shape[1], -1)) # N * N (x D)
factor = self.bandwidth_factor(particles.shape[0])
if diffs.ndim >= 2:
diff_norms = safe_norm(diffs, ord=2, axis=-1)
else:
diff_norms = diffs
median = jnp.argsort(diff_norms)[int(diffs.shape[0] / 2)]
bandwidth = jnp.abs(diffs)[median] ** 2 * factor + 1e-5
if self._normed():
bandwidth = bandwidth[0]
def kernel(x, y):
diff = safe_norm(x - y, ord=2) if self._normed() and x.ndim >= 1 else x - y
kernel_res = jnp.exp(- diff ** 2 / bandwidth)
if self._mode == 'matrix':
if self.matrix_mode == 'norm_diag':
return kernel_res * jnp.identity(x.shape[0])
else:
return jnp.diag(kernel_res)
else:
return kernel_res
return kernel
@property
def mode(self):
return self._mode
class IMQKernel(SteinKernel):
"""
Calculates the IMQ kernel, from "Measuring Sample Quality with Kernels" by Gorham and Mackey
:param mode: Either 'norm' (default) specifying to take the norm of each particle or 'vector' to return a component-wise kernel
:param const: Positive multi-quadratic constant (c)
:param exponent: Inverse exponent (beta) between (-1, 0)
"""
# Based on
def __init__(self, mode='norm', const=1.0, expon=-0.5):
assert mode == 'norm' or mode == 'vector'
assert 0.0 < const
assert -1.0 < expon < 0.0
self._mode = mode
self.const = const
self.expon = expon
@property
def mode(self):
return self._mode
def compute(self, particles, particle_info, loss_fn):
def kernel(x, y):
diff = safe_norm(x - y, ord=2, axis=-1) if self._mode == 'norm' else x - y
return (jnp.array(self.const) ** 2 + diff ** 2) ** self.expon
return kernel
class LinearKernel(SteinKernel):
"""
Calculates the linear kernel, from "Stein Variational Gradient Descent as Moment Matching" by Liu and Wang
"""
def __init__(self):
self._mode = 'norm'
@property
def mode(self):
return self._mode
def compute(self, particles: jnp.ndarray, particle_info, loss_fn):
def kernel(x, y):
if x.ndim >= 1:
return x @ y + 1
else:
return x * y + 1
return kernel
class RandomFeatureKernel(SteinKernel):
"""
Calculates the random kernel, from "Stein Variational Gradient Descent as Moment Matching" by Liu and Wang
:param bandwidth_subset: How many particles should be used to calculate the bandwidth? (default None, meaning all particles)
:param random_indices: The set of indices which to do random feature expansion on. (default None, meaning all indices)
:param bandwidth_factor: A multiplier to the bandwidth based on data size n (default 1/log(n))
"""
def __init__(self, bandwidth_subset=None, random_indices=None,
bandwidth_factor: Callable[[float], float] = lambda n: 1 / jnp.log(n)):
assert bandwidth_subset is None or bandwidth_subset > 0
self._mode = 'norm'
self.bandwidth_subset = bandwidth_subset
self.random_indices = None
self.bandwidth_factor = bandwidth_factor
self._random_weights = None
self._random_biases = None
@property
def mode(self):
return self._mode
def compute(self, particles, particle_info, loss_fn):
if self._random_weights is None:
self._random_weights = jnp.array(npr.randn(*particles.shape))
self._random_biases = jnp.array(npr.rand(*particles.shape) * 2 * np.pi)
factor = self.bandwidth_factor(particles.shape[0])
if self.bandwidth_subset is not None:
particles = particles[npr.choice(particles.shape[0], self.bandwidth_subset)]
diffs = jnp.expand_dims(particles, axis=0) - jnp.expand_dims(particles, axis=1) # N x N x D
if particles.ndim >= 2:
diffs = safe_norm(diffs, ord=2, axis=-1) # N x N x D -> N x N
diffs = jnp.reshape(diffs, (diffs.shape[0] * diffs.shape[1], -1)) # N * N x 1
if diffs.ndim >= 2:
diff_norms = safe_norm(diffs, ord=2, axis=-1)
else:
diff_norms = diffs
median = jnp.argsort(diff_norms)[int(diffs.shape[0] / 2)]
bandwidth = jnp.abs(diffs)[median] ** 2 * factor + 1e-5
def feature(x, w, b):
return jnp.sqrt(2) * jnp.cos((x @ w + b) / bandwidth)
def kernel(x, y):
ws = self._random_weights if self.random_indices is None else self._random_weights[self.random_indices]
bs = self._random_biases if self.random_indices is None else self._random_biases[self.random_indices]
return jnp.sum(jax.vmap(lambda w, b: feature(x, w, b) * feature(y, w, b))(ws, bs))
return kernel
class MixtureKernel(SteinKernel):
"""
Implements a mixture of multiple kernels from "Stein Variational Gradient Descent as Moment Matching" by Liu and Wang
:param ws: Weight of each kernel in the mixture
:param kernel_fns: Different kernel functions to mix together
"""
def __init__(self, ws: List[float], kernel_fns: List[SteinKernel]):
assert len(ws) == len(kernel_fns)
assert len(kernel_fns) > 1
assert all(kf.mode == kernel_fns[0].mode for kf in kernel_fns)
self.ws = ws
self.kernel_fns = kernel_fns
@property
def mode(self):
return self.kernel_fns[0].mode
def compute(self, particles, particle_info, loss_fn):
kernels = [kf.compute(particles, particle_info, loss_fn) for kf in self.kernel_fns]
def kernel(x, y):
res = self.ws[0] * kernels[0](x, y)
for w, k in zip(self.ws[1:], kernels[1:]):
res = res + w * k(x, y)
return res
return kernel
class HessianPrecondMatrix(PrecondMatrix):
"""
Calculates the constant precondition matrix based on the negative Hessian of the loss
from "Stein Variational Gradient Descent with Matrix-Valued Kernels" by Wang, Tang, Bajaj and Liu
"""
def compute(self, particles, loss_fn):
hessian = -jax.vmap(jax.hessian(loss_fn))(particles)
return hessian
class PrecondMatrixKernel(SteinKernel):
"""
Calculates the preconditioned kernel from "Stein Variational Gradient Descent with Matrix-Valued Kernels" by Wang, Tang, Bajaj and Liu
:param precond_matrix_fn: The constant preconditioning matrix
:param inner_kernel_fn: The inner kernel function
:param precond_mode: How to use the precondition matrix, either constant ('const')
or as mixture with anchor points ('anchor_points')
"""
def __init__(self, precond_matrix_fn: PrecondMatrix, inner_kernel_fn: SteinKernel,
precond_mode='anchor_points'):
assert inner_kernel_fn.mode == 'matrix'
assert precond_mode == 'const' or precond_mode == 'anchor_points'
self.precond_matrix_fn = precond_matrix_fn
self.inner_kernel_fn = inner_kernel_fn
self.precond_mode = precond_mode
@property
def mode(self):
return 'matrix'
def compute(self, particles, particle_info, loss_fn):
qs = self.precond_matrix_fn.compute(particles, loss_fn)
if self.precond_mode == 'const':
qs = jnp.expand_dims(jnp.mean(qs, axis=0), axis=0)
qs_inv = jnp.linalg.inv(qs)
qs_sqrt = sqrth(qs)
qs_inv_sqrt = sqrth(qs_inv)
inner_kernel = self.inner_kernel_fn.compute(particles, particle_info, loss_fn)
def kernel(x, y):
if self.precond_mode == 'const':
wxs = jnp.array([1.])
wys = jnp.array([1.])
else:
wxs = jax.nn.softmax(
jax.vmap(lambda z, q_inv: dist.MultivariateNormal(z, posdef(q_inv)).log_prob(x))(particles, qs_inv))
wys = jax.nn.softmax(
jax.vmap(lambda z, q_inv: dist.MultivariateNormal(z, posdef(q_inv)).log_prob(y))(particles, qs_inv))
return jnp.sum(
jax.vmap(lambda qs, qis, wx, wy: wx * wy * (qis @ inner_kernel(qs @ x, qs @ y) @ qis.transpose()))(
qs_sqrt, qs_inv_sqrt, wxs, wys), axis=0)
return kernel
class GraphicalKernel(SteinKernel):
"""
Calculates graphical kernel used in "Stein Variational Message Passing for Continuous Graphical Models" by Wang, Zheng and Liu
:param local_kernel_fns: A mapping between parameters and a choice of kernel function for that parameter (default to default_kernel_fn for each parameter)
:param default_kernel_fn: The default choice of kernel function when none is specified for a particular parameter
"""
def __init__(self, local_kernel_fns: Dict[str, SteinKernel] = None, default_kernel_fn: SteinKernel = RBFKernel()):
self.local_kernel_fns = local_kernel_fns if local_kernel_fns is not None else {}
self.default_kernel_fn = default_kernel_fn
@property
def mode(self):
return 'matrix'
def compute(self, particles, particle_info, loss_fn):
local_kernels = []
for pk, (start_idx, end_idx) in particle_info.items():
pk_kernel_fn = self.local_kernel_fns.get(pk, self.default_kernel_fn)
pk_loss_fn = lambda ps: loss_fn(
jnp.concatenate([particles[:, :start_idx], ps, particles[:, end_idx:]], axis=-1))
pk_kernel = pk_kernel_fn.compute(particles[:, start_idx:end_idx], {pk: (0, end_idx - start_idx)},
pk_loss_fn)
local_kernels.append((pk_kernel, pk_kernel_fn.mode, start_idx, end_idx))
def kernel(x, y):
kernel_res = []
for kernel, mode, start_idx, end_idx in local_kernels:
v = kernel(x[start_idx:end_idx], y[start_idx:end_idx])
if mode == 'norm':
v = v * jnp.identity(end_idx - start_idx)
elif mode == 'vector':
v = jnp.diag(v)
kernel_res.append(v)
return jax.scipy.linalg.block_diag(*kernel_res)
return kernel
| 41.401869
| 158
| 0.633785
|
from abc import ABC, abstractmethod
from typing import Callable, List, Dict, Tuple
import numpy as np
import numpy.random as npr
import jax.numpy as jnp
import jax.scipy.stats
import jax.scipy.linalg
import numpyro.distributions as dist
from numpyro.util import sqrth, posdef, safe_norm
class PrecondMatrix(ABC):
@abstractmethod
def compute(self, particles: jnp.ndarray, loss_fn: Callable[[jnp.ndarray], float]):
raise NotImplementedError
class SteinKernel(ABC):
@property
@abstractmethod
def mode(self):
raise NotImplementedError
@abstractmethod
def compute(self, particles: jnp.ndarray, particle_info: Dict[str, Tuple[int, int]],
loss_fn: Callable[[jnp.ndarray], float]):
raise NotImplementedError
class RBFKernel(SteinKernel):
def __init__(self, mode='norm', matrix_mode='norm_diag',
bandwidth_factor: Callable[[float], float] = lambda n: 1 / jnp.log(n)):
assert mode == 'norm' or mode == 'vector' or mode == 'matrix'
assert matrix_mode == 'norm_diag' or matrix_mode == 'vector_diag'
self._mode = mode
self.matrix_mode = matrix_mode
self.bandwidth_factor = bandwidth_factor
def _normed(self):
return self._mode == 'norm' or (self.mode == 'matrix' and self.matrix_mode == 'norm_diag')
def compute(self, particles, particle_info, loss_fn):
diffs = jnp.expand_dims(particles, axis=0) - jnp.expand_dims(particles, axis=1)
if self._normed() and particles.ndim >= 2:
diffs = safe_norm(diffs, ord=2, axis=-1)
diffs = jnp.reshape(diffs, (diffs.shape[0] * diffs.shape[1], -1))
factor = self.bandwidth_factor(particles.shape[0])
if diffs.ndim >= 2:
diff_norms = safe_norm(diffs, ord=2, axis=-1)
else:
diff_norms = diffs
median = jnp.argsort(diff_norms)[int(diffs.shape[0] / 2)]
bandwidth = jnp.abs(diffs)[median] ** 2 * factor + 1e-5
if self._normed():
bandwidth = bandwidth[0]
def kernel(x, y):
diff = safe_norm(x - y, ord=2) if self._normed() and x.ndim >= 1 else x - y
kernel_res = jnp.exp(- diff ** 2 / bandwidth)
if self._mode == 'matrix':
if self.matrix_mode == 'norm_diag':
return kernel_res * jnp.identity(x.shape[0])
else:
return jnp.diag(kernel_res)
else:
return kernel_res
return kernel
@property
def mode(self):
return self._mode
class IMQKernel(SteinKernel):
def __init__(self, mode='norm', const=1.0, expon=-0.5):
assert mode == 'norm' or mode == 'vector'
assert 0.0 < const
assert -1.0 < expon < 0.0
self._mode = mode
self.const = const
self.expon = expon
@property
def mode(self):
return self._mode
def compute(self, particles, particle_info, loss_fn):
def kernel(x, y):
diff = safe_norm(x - y, ord=2, axis=-1) if self._mode == 'norm' else x - y
return (jnp.array(self.const) ** 2 + diff ** 2) ** self.expon
return kernel
class LinearKernel(SteinKernel):
def __init__(self):
self._mode = 'norm'
@property
def mode(self):
return self._mode
def compute(self, particles: jnp.ndarray, particle_info, loss_fn):
def kernel(x, y):
if x.ndim >= 1:
return x @ y + 1
else:
return x * y + 1
return kernel
class RandomFeatureKernel(SteinKernel):
def __init__(self, bandwidth_subset=None, random_indices=None,
bandwidth_factor: Callable[[float], float] = lambda n: 1 / jnp.log(n)):
assert bandwidth_subset is None or bandwidth_subset > 0
self._mode = 'norm'
self.bandwidth_subset = bandwidth_subset
self.random_indices = None
self.bandwidth_factor = bandwidth_factor
self._random_weights = None
self._random_biases = None
@property
def mode(self):
return self._mode
def compute(self, particles, particle_info, loss_fn):
if self._random_weights is None:
self._random_weights = jnp.array(npr.randn(*particles.shape))
self._random_biases = jnp.array(npr.rand(*particles.shape) * 2 * np.pi)
factor = self.bandwidth_factor(particles.shape[0])
if self.bandwidth_subset is not None:
particles = particles[npr.choice(particles.shape[0], self.bandwidth_subset)]
diffs = jnp.expand_dims(particles, axis=0) - jnp.expand_dims(particles, axis=1)
if particles.ndim >= 2:
diffs = safe_norm(diffs, ord=2, axis=-1)
diffs = jnp.reshape(diffs, (diffs.shape[0] * diffs.shape[1], -1))
if diffs.ndim >= 2:
diff_norms = safe_norm(diffs, ord=2, axis=-1)
else:
diff_norms = diffs
median = jnp.argsort(diff_norms)[int(diffs.shape[0] / 2)]
bandwidth = jnp.abs(diffs)[median] ** 2 * factor + 1e-5
def feature(x, w, b):
return jnp.sqrt(2) * jnp.cos((x @ w + b) / bandwidth)
def kernel(x, y):
ws = self._random_weights if self.random_indices is None else self._random_weights[self.random_indices]
bs = self._random_biases if self.random_indices is None else self._random_biases[self.random_indices]
return jnp.sum(jax.vmap(lambda w, b: feature(x, w, b) * feature(y, w, b))(ws, bs))
return kernel
class MixtureKernel(SteinKernel):
def __init__(self, ws: List[float], kernel_fns: List[SteinKernel]):
assert len(ws) == len(kernel_fns)
assert len(kernel_fns) > 1
assert all(kf.mode == kernel_fns[0].mode for kf in kernel_fns)
self.ws = ws
self.kernel_fns = kernel_fns
@property
def mode(self):
return self.kernel_fns[0].mode
def compute(self, particles, particle_info, loss_fn):
kernels = [kf.compute(particles, particle_info, loss_fn) for kf in self.kernel_fns]
def kernel(x, y):
res = self.ws[0] * kernels[0](x, y)
for w, k in zip(self.ws[1:], kernels[1:]):
res = res + w * k(x, y)
return res
return kernel
class HessianPrecondMatrix(PrecondMatrix):
def compute(self, particles, loss_fn):
hessian = -jax.vmap(jax.hessian(loss_fn))(particles)
return hessian
class PrecondMatrixKernel(SteinKernel):
def __init__(self, precond_matrix_fn: PrecondMatrix, inner_kernel_fn: SteinKernel,
precond_mode='anchor_points'):
assert inner_kernel_fn.mode == 'matrix'
assert precond_mode == 'const' or precond_mode == 'anchor_points'
self.precond_matrix_fn = precond_matrix_fn
self.inner_kernel_fn = inner_kernel_fn
self.precond_mode = precond_mode
@property
def mode(self):
return 'matrix'
def compute(self, particles, particle_info, loss_fn):
qs = self.precond_matrix_fn.compute(particles, loss_fn)
if self.precond_mode == 'const':
qs = jnp.expand_dims(jnp.mean(qs, axis=0), axis=0)
qs_inv = jnp.linalg.inv(qs)
qs_sqrt = sqrth(qs)
qs_inv_sqrt = sqrth(qs_inv)
inner_kernel = self.inner_kernel_fn.compute(particles, particle_info, loss_fn)
def kernel(x, y):
if self.precond_mode == 'const':
wxs = jnp.array([1.])
wys = jnp.array([1.])
else:
wxs = jax.nn.softmax(
jax.vmap(lambda z, q_inv: dist.MultivariateNormal(z, posdef(q_inv)).log_prob(x))(particles, qs_inv))
wys = jax.nn.softmax(
jax.vmap(lambda z, q_inv: dist.MultivariateNormal(z, posdef(q_inv)).log_prob(y))(particles, qs_inv))
return jnp.sum(
jax.vmap(lambda qs, qis, wx, wy: wx * wy * (qis @ inner_kernel(qs @ x, qs @ y) @ qis.transpose()))(
qs_sqrt, qs_inv_sqrt, wxs, wys), axis=0)
return kernel
class GraphicalKernel(SteinKernel):
def __init__(self, local_kernel_fns: Dict[str, SteinKernel] = None, default_kernel_fn: SteinKernel = RBFKernel()):
self.local_kernel_fns = local_kernel_fns if local_kernel_fns is not None else {}
self.default_kernel_fn = default_kernel_fn
@property
def mode(self):
return 'matrix'
def compute(self, particles, particle_info, loss_fn):
local_kernels = []
for pk, (start_idx, end_idx) in particle_info.items():
pk_kernel_fn = self.local_kernel_fns.get(pk, self.default_kernel_fn)
pk_loss_fn = lambda ps: loss_fn(
jnp.concatenate([particles[:, :start_idx], ps, particles[:, end_idx:]], axis=-1))
pk_kernel = pk_kernel_fn.compute(particles[:, start_idx:end_idx], {pk: (0, end_idx - start_idx)},
pk_loss_fn)
local_kernels.append((pk_kernel, pk_kernel_fn.mode, start_idx, end_idx))
def kernel(x, y):
kernel_res = []
for kernel, mode, start_idx, end_idx in local_kernels:
v = kernel(x[start_idx:end_idx], y[start_idx:end_idx])
if mode == 'norm':
v = v * jnp.identity(end_idx - start_idx)
elif mode == 'vector':
v = jnp.diag(v)
kernel_res.append(v)
return jax.scipy.linalg.block_diag(*kernel_res)
return kernel
| true
| true
|
1c43236975e8ef43c2b62f02abe76fd6e5c37eed
| 1,369
|
py
|
Python
|
platforms/m3/pre_v21e/software/mbc_code/triggers/auto_time_gen.py
|
lab11/M-ulator
|
95b49c6194678c74accca4a20af71380efbcac5f
|
[
"Apache-2.0",
"MIT"
] | 19
|
2015-01-26T10:47:23.000Z
|
2021-08-13T11:07:54.000Z
|
platforms/m3/pre_v21e/software/mbc_code_v6_3/triggers/auto_time_gen.py
|
lab11/M-ulator
|
95b49c6194678c74accca4a20af71380efbcac5f
|
[
"Apache-2.0",
"MIT"
] | 14
|
2015-08-24T02:35:46.000Z
|
2021-05-05T03:53:44.000Z
|
platforms/m3/pre_v21e/software/mbc_code/triggers/auto_time_gen.py
|
lab11/M-ulator
|
95b49c6194678c74accca4a20af71380efbcac5f
|
[
"Apache-2.0",
"MIT"
] | 9
|
2015-05-27T23:27:35.000Z
|
2020-10-05T22:02:43.000Z
|
import time
from datetime import datetime
import os
import sys
from file_gen import set_trigger
import yaml
trigger_dir = sys.argv[1]
out_dir = os.path.dirname(os.path.abspath(__file__)) + '/' + trigger_dir + '/'
config_file = out_dir + 'trigger_configs.yaml'
with open(config_file, 'r') as file:
l = yaml.load(file, Loader=yaml.FullLoader)
while True:
s = datetime.now().time()
if(s.hour >= 23 and s.minute >= 58):
print('Current time is: {}. Sleeping for 2 minutes'.format(s))
time.sleep(120)
else:
break
if(s.second >= 54):
print('Waiting for next minute')
time.sleep(10)
s = datetime.now().time()
time.sleep(55 - (s.second + s.microsecond / 1000000))
# def set_trigger(filename, val):
# print(out_dir + filename + '.bat')
# with open(out_dir + filename + '.bat', 'w') as f:
# f.write('call SET_GOC_SPEED.bat\ncall SET_COM.bat\nm3_ice -y -s %COM% goc -d %GOC_DELAY% -V3 -g %GOC_SPEED_PR% message 0000008C {}\n'.format(format(val, 'x').zfill(8)))
###################### 0x0B ##########################
op_name = 'xo_day_time_in_sec'
num = 0x0B
filename = 'GOC-0x{}-{}'.format(format(num, 'x').zfill(2).upper(), op_name)
val = (num << 24)
filename1 = 'write-auto-time'
H = s.hour
M = s.minute + 1
val1 = val | (1 << 23)
val1 |= (H << 6)
val1 |= M
set_trigger(trigger_dir, filename1, val1, l)
| 26.326923
| 178
| 0.622352
|
import time
from datetime import datetime
import os
import sys
from file_gen import set_trigger
import yaml
trigger_dir = sys.argv[1]
out_dir = os.path.dirname(os.path.abspath(__file__)) + '/' + trigger_dir + '/'
config_file = out_dir + 'trigger_configs.yaml'
with open(config_file, 'r') as file:
l = yaml.load(file, Loader=yaml.FullLoader)
while True:
s = datetime.now().time()
if(s.hour >= 23 and s.minute >= 58):
print('Current time is: {}. Sleeping for 2 minutes'.format(s))
time.sleep(120)
else:
break
if(s.second >= 54):
print('Waiting for next minute')
time.sleep(10)
s = datetime.now().time()
time.sleep(55 - (s.second + s.microsecond / 1000000))
| true
| true
|
1c432438bcef24f17a6ee9fccf6bda104862d862
| 17,213
|
py
|
Python
|
tensorflow_graphics/geometry/convolution/utils.py
|
prafael18/graphics
|
2f250a53431697cfb43fd1edf61a2d965b20c596
|
[
"Apache-2.0"
] | 2
|
2021-01-06T03:24:47.000Z
|
2021-01-07T06:39:54.000Z
|
tensorflow_graphics/geometry/convolution/utils.py
|
prafael18/graphics
|
2f250a53431697cfb43fd1edf61a2d965b20c596
|
[
"Apache-2.0"
] | 1
|
2021-02-24T10:36:11.000Z
|
2021-02-24T10:36:11.000Z
|
tensorflow_graphics/geometry/convolution/utils.py
|
isabella232/graphics-1
|
d5c26cf05125e5c096f5b2cde6c85f88c7df2d59
|
[
"Apache-2.0"
] | 1
|
2021-10-11T09:10:56.000Z
|
2021-10-11T09:10:56.000Z
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements various sparse data utilities for graphs and meshes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.util import shape
def _is_dynamic_shape(tensors):
"""Helper function to test if any tensor in a list has a dynamic shape.
Args:
tensors: A list or tuple of tensors with shapes to test.
Returns:
True if any tensor in the list has a dynamic shape, False otherwise.
"""
if not isinstance(tensors, (list, tuple)):
raise ValueError("'tensors' must be list of tuple.")
return not all([shape.is_static(tensor.shape) for tensor in tensors])
def check_valid_graph_convolution_input(data, neighbors, sizes):
"""Checks that the inputs are valid for graph convolution ops.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
data: A `float` tensor with shape `[A1, ..., An, V1, V2]`.
neighbors: A SparseTensor with the same type as `data` and with shape `[A1,
..., An, V1, V1]`.
sizes: An `int` tensor of shape `[A1, ..., An]`. Optional, can be `None`.
Raises:
TypeError: if the input types are invalid.
ValueError: if the input dimensions are invalid.
"""
if not data.dtype.is_floating:
raise TypeError("'data' must have a float type.")
if neighbors.dtype != data.dtype:
raise TypeError("'neighbors' and 'data' must have the same type.")
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
if not isinstance(neighbors, tf.sparse.SparseTensor):
raise ValueError("'neighbors' must be a SparseTensor.")
data_ndims = data.shape.ndims
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=1)
shape.check_static(
tensor=neighbors, tensor_name="neighbors", has_rank=data_ndims)
if not _is_dynamic_shape(tensors=(data, neighbors)):
shape.compare_dimensions(
tensors=(data, neighbors, neighbors),
tensor_names=("data", "neighbors", "neighbors"),
axes=(-2, -2, -1))
if sizes is None:
shape.compare_batch_dimensions(
tensors=(data, neighbors),
tensor_names=("data", "neighbors"),
last_axes=-3,
broadcast_compatible=False)
else:
shape.check_static(
tensor=sizes, tensor_name="sizes", has_rank=data_ndims - 2)
shape.compare_batch_dimensions(
tensors=(data, neighbors, sizes),
tensor_names=("data", "neighbors", "sizes"),
last_axes=(-3, -3, -1),
broadcast_compatible=False)
def check_valid_graph_pooling_input(data, pool_map, sizes):
"""Checks that the inputs are valid for graph pooling.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
data: A `float` tensor with shape `[A1, ..., An, V1, C]`.
pool_map: A SparseTensor with the same type as `data` and with shape `[A1,
..., An, V2, V1]`.
sizes: An `int` tensor of shape `[A1, ..., An, 2]`. Can be `None`.
Raises:
TypeError: if the input types are invalid.
ValueError: if the input dimensions are invalid.
"""
if not data.dtype.is_floating:
raise TypeError("'data' must have a float type.")
if pool_map.dtype != data.dtype:
raise TypeError("'pool_map' and 'data' must have the same type.")
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
if not isinstance(pool_map, tf.sparse.SparseTensor):
raise ValueError("'pool_map' must be a SparseTensor.")
data_ndims = data.shape.ndims
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=1)
shape.check_static(
tensor=pool_map, tensor_name="pool_map", has_rank=data_ndims)
if not _is_dynamic_shape(tensors=(data, pool_map)):
shape.compare_dimensions(
tensors=(data, pool_map),
tensor_names=("data", "pool_map"),
axes=(-2, -1))
if sizes is None:
shape.compare_batch_dimensions(
tensors=(data, pool_map),
tensor_names=("data", "pool_map"),
last_axes=-3,
broadcast_compatible=False)
else:
shape.check_static(
tensor=sizes, tensor_name="sizes", has_rank=data_ndims - 1)
shape.compare_batch_dimensions(
tensors=(data, pool_map, sizes),
tensor_names=("data", "pool_map", "sizes"),
last_axes=(-3, -3, -2),
broadcast_compatible=False)
def check_valid_graph_unpooling_input(data, pool_map, sizes):
"""Checks that the inputs are valid for graph unpooling.
Note:
In the following, A1 to A3 are optional batch dimensions.
Args:
data: A `float` tensor with shape `[A1, ..., A3, V1, C]`.
pool_map: A `SparseTensor` with the same type as `data` and with shape `[A1,
..., A3, V1, V2]`.
sizes: An `int` tensor of shape `[A1, ..., A3, 2]`. Can be `None`.
Raises:
TypeError: if the input types are invalid.
ValueError: if the input dimensions are invalid.
"""
if not data.dtype.is_floating:
raise TypeError("'data' must have a float type.")
if pool_map.dtype != data.dtype:
raise TypeError("'pool_map' and 'data' must have the same type.")
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
if not isinstance(pool_map, tf.sparse.SparseTensor):
raise ValueError("'pool_map' must be a SparseTensor.")
data_ndims = data.shape.ndims
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=1)
shape.check_static(tensor=data, tensor_name="data", has_rank_less_than=6)
shape.check_static(
tensor=pool_map, tensor_name="pool_map", has_rank=data_ndims)
if not _is_dynamic_shape(tensors=(data, pool_map)):
shape.compare_dimensions(
tensors=(data, pool_map),
tensor_names=("data", "pool_map"),
axes=(-2, -2))
if sizes is None:
shape.compare_batch_dimensions(
tensors=(data, pool_map),
tensor_names=("data", "pool_map"),
last_axes=-3,
broadcast_compatible=False)
else:
shape.check_static(
tensor=sizes, tensor_name="sizes", has_rank=data_ndims - 1)
shape.compare_batch_dimensions(
tensors=(data, pool_map, sizes),
tensor_names=("data", "pool_map", "sizes"),
last_axes=(-3, -3, -2),
broadcast_compatible=False)
def flatten_batch_to_2d(data, sizes=None, name=None):
"""Reshapes a batch of 2d Tensors by flattening across the batch dimensions.
Note:
In the following, A1 to An are optional batch dimensions.
A tensor with shape `[A1, ..., An, D1, D2]` will be reshaped to one
with shape `[A1*...*An*D1, D2]`. This function also returns an inverse
function that returns any tensor with shape `[A1*...*An*D1, D3]` to one
with shape `[A1, ..., An, D1, D3]`.
Padded inputs in dimension D1 are allowed. `sizes` determines the first
elements from D1 to select from each batch dimension.
Examples:
```python
data = [[[1., 2.], [3., 4.]],
[[5., 6.], [7., 8.]],
[[9., 10.], [11., 12.]]]
sizes = None
output = flatten_batch_to_2d(data, size)
print(output)
>>> [[1., 2.], [3., 4.], [5., 6.], [7., 8.], [9., 10.], [11., 12.]]
data = [[[1., 2.], [0., 0.]],
[[5., 6.], [7., 8.]],
[[9., 10.], [0., 0.]]]
sizes = [1, 2, 1]
output = flatten_batch_to_2d(data, size)
print(output)
>>> [[1., 2.], [5., 6.], [7., 8.], [9., 10.]]
```
Args:
data: A tensor with shape `[A1, ..., An, D1, D2]`.
sizes: An `int` tensor with shape `[A1, ..., An]`. Can be `None`. `sizes[i]
<= D1`.
name: A name for this op. Defaults to 'utils_flatten_batch_to_2d'.
Returns:
A tensor with shape `[A1*...*An*D1, D2]` if `sizes == None`, otherwise a
tensor with shape `[sum(sizes), D2]`.
A function that reshapes a tensor with shape `[A1*...*An*D1, D3]` to a
tensor with shape `[A1, ..., An, D1, D3]` if `sizes == None`, otherwise
it reshapes a tensor with shape `[sum(sizes), D3]` to one with shape
`[A1, ..., An, ..., D1, D3]`.
Raises:
ValueError: if the input tensor dimensions are invalid.
"""
with tf.compat.v1.name_scope(name, "utils_flatten_batch_to_2d",
[data, sizes]):
data = tf.convert_to_tensor(value=data)
if sizes is not None:
sizes = tf.convert_to_tensor(value=sizes)
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=2)
if sizes is not None:
shape.check_static(
tensor=sizes, tensor_name="sizes", has_rank=data.shape.ndims - 2)
shape.compare_batch_dimensions(
tensors=(data, sizes),
tensor_names=("data", "sizes"),
last_axes=(-3, -1),
broadcast_compatible=False)
data_shape = tf.shape(input=data)
if sizes is None:
flat = tf.reshape(data, shape=(-1, data_shape[-1]))
def unflatten(flat, name=None):
"""Invert flatten_batch_to_2d."""
with tf.compat.v1.name_scope(name, "utils_unflatten", [flat]):
flat = tf.convert_to_tensor(value=flat)
output_shape = tf.concat((data_shape[:-1], tf.shape(input=flat)[-1:]),
axis=0)
return tf.reshape(flat, output_shape)
else:
# Create a mask for the desired rows in `data` to select for flattening:
# `mask` has shape `[A1, ..., An, D1]` and
# `mask[a1, ..., an, :] = [True, ..., True, False, ..., False]` where
# the number of True elements is `sizes[a1, ..., an]`.
mask = tf.sequence_mask(sizes, data_shape[-2])
mask_indices = tf.cast(tf.compat.v1.where(mask), tf.int32)
flat = tf.gather_nd(params=data, indices=mask_indices)
def unflatten(flat, name=None):
"""Invert flatten_batch_to_2d."""
with tf.compat.v1.name_scope(name, "utils_unflatten", [flat]):
flat = tf.convert_to_tensor(value=flat)
output_shape = tf.concat((data_shape[:-1], tf.shape(input=flat)[-1:]),
axis=0)
return tf.scatter_nd(
indices=mask_indices, updates=flat, shape=output_shape)
return flat, unflatten
def unflatten_2d_to_batch(data, sizes, max_rows=None, name=None):
r"""Reshapes a 2d Tensor into a batch of 2d Tensors.
The `data` tensor with shape `[D1, D2]` will be mapped to a tensor with shape
`[A1, ..., An, max_rows, D2]` where `max_rows` defaults to `max(sizes)`.
`sizes` determines the segment of rows in the input that get mapped to a
particular batch dimension (`sum(sizes) == D1`).
Examples:
```python
data = [[1., 2.],
[3., 4.],
[5., 6.],
[7., 8.],
[9., 10.],
[11., 12.]]
sizes = [2, 3, 1]
output = unflatten_2d_to_batch(data, sizes, max_rows=None)
print(output.shape)
>>> [3, 3, 2]
print(output)
>>> [[[1., 2.],
[3., 4.],
[0., 0.]],
[[5., 6.],
[7., 8.],
[9., 10.]],
[[11., 12.],
[0., 0.],
[0., 0.]]]
output = unflatten_2d_to_batch(data, sizes, max_rows=4)
print(output.shape)
>>> [3, 4, 2]
print(output)
>>> [[[1., 2.],
[3., 4.],
[0., 0.],
[0., 0.]],
[[5., 6.],
[7., 8.],
[9., 10.],
[0., 0.]],
[[11., 12.],
[0., 0.],
[0., 0.],
[0., 0.]]]
```
Args:
data: A tensor with shape `[D1, D2]`.
sizes: An `int` tensor with shape `[A1, ..., An]`.
max_rows: An `int` specifying the maximum number of rows in the unflattened
output. `max_rows >= max(sizes)`.
name: A name for this op. Defaults to 'utils_unflatten_2d_to_batch'.
Returns:
A tensor with shape `[A1, A2, ..., max_rows, D2]`.
"""
with tf.compat.v1.name_scope(name, "utils_unflatten_2d_to_batch",
[data, sizes]):
data = tf.convert_to_tensor(value=data)
sizes = tf.convert_to_tensor(value=sizes)
if max_rows is None:
max_rows = tf.reduce_max(input_tensor=sizes)
else:
max_rows = tf.convert_to_tensor(value=max_rows)
shape.check_static(tensor=data, tensor_name="data", has_rank=2)
if not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
mask = tf.sequence_mask(sizes, max_rows)
mask_indices = tf.cast(tf.compat.v1.where(mask), tf.int32)
output_shape = tf.concat(
(tf.shape(input=sizes), (max_rows,), tf.shape(input=data)[-1:]), axis=0)
return tf.scatter_nd(indices=mask_indices, updates=data, shape=output_shape)
def convert_to_block_diag_2d(data,
sizes=None,
validate_indices=False,
name=None):
"""Convert a batch of 2d SparseTensors to a 2d block diagonal SparseTensor.
Note:
In the following, A1 to An are optional batch dimensions.
A `SparseTensor` with dense shape `[A1, ..., An, D1, D2]` will be reshaped
to one with shape `[A1*...*An*D1, A1*...*An*D2]`.
Padded inputs in dims D1 and D2 are allowed. `sizes` indicates the un-padded
shape for each inner `[D1, D2]` matrix. The additional (padded) rows and
columns will be omitted in the block diagonal output.
If padded (`sizes != None`), the input should not contain any sparse indices
outside the bounds indicated by `sizes`. Setting `validate_indices=True` will
explicitly filter any invalid sparse indices before block diagonalization.
Args:
data: A `SparseTensor` with dense shape `[A1, ..., An, D1, D2]`.
sizes: A tensor with shape `[A1, ..., An, 2]`. Can be `None` (indicates no
padding). If not `None`, `sizes` indicates the true sizes (before padding)
of the inner dimensions of `data`.
validate_indices: A boolean. Ignored if `sizes==None`. If True,
out-of-bounds indices in `data` are explicitly ignored, otherwise
out-of-bounds indices will cause undefined behavior.
name: A name for this op. Defaults to 'utils_convert_to_block_diag_2d'.
Returns:
A 2d block-diagonal SparseTensor.
Raises:
TypeError: if the input types are invalid.
ValueError: if the input dimensions are invalid.
"""
with tf.compat.v1.name_scope(name, "utils_convert_to_block_diag_2d",
[data, sizes, validate_indices]):
data = tf.compat.v1.convert_to_tensor_or_sparse_tensor(value=data)
if sizes is not None:
sizes = tf.convert_to_tensor(value=sizes)
if not isinstance(data, tf.SparseTensor):
raise TypeError("'data' must be a 'SparseTensor'.")
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=2)
if sizes is not None:
shape.check_static(
tensor=sizes,
tensor_name="sizes",
has_rank=data.shape.ndims - 1,
has_dim_equals=(-1, 2))
shape.compare_batch_dimensions(
tensors=(data, sizes),
tensor_names=("data", "sizes"),
last_axes=(-3, -2),
broadcast_compatible=False)
data_shape = tf.shape(input=data)
data = tf.sparse.reshape(data, [-1, data_shape[-2], data_shape[-1]])
indices = data.indices
if sizes is not None:
sizes = tf.cast(tf.reshape(sizes, shape=(-1, 2)), tf.int64)
if validate_indices:
in_bounds = ~tf.reduce_any(
input_tensor=indices[:, 1:] >= tf.gather(sizes, indices[:, 0]),
axis=-1)
indices = tf.boolean_mask(tensor=indices, mask=in_bounds)
values = tf.boolean_mask(tensor=data.values, mask=in_bounds)
else:
values = data.values
cumsum = tf.cumsum(sizes, axis=0, exclusive=True)
index_shift = tf.gather(cumsum, indices[:, 0])
indices = indices[:, 1:] + index_shift
block_diag = tf.SparseTensor(indices, values,
tf.reduce_sum(input_tensor=sizes, axis=0))
else:
data_shape = tf.shape(input=data, out_type=tf.int64)
index_shift = tf.expand_dims(indices[:, 0], -1) * data_shape[1:]
indices = indices[:, 1:] + index_shift
block_diag = tf.SparseTensor(indices, data.values,
data_shape[0] * data_shape[1:])
return block_diag
# API contains all public functions and classes.
__all__ = []
| 37.419565
| 81
| 0.630338
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_graphics.util import shape
def _is_dynamic_shape(tensors):
if not isinstance(tensors, (list, tuple)):
raise ValueError("'tensors' must be list of tuple.")
return not all([shape.is_static(tensor.shape) for tensor in tensors])
def check_valid_graph_convolution_input(data, neighbors, sizes):
if not data.dtype.is_floating:
raise TypeError("'data' must have a float type.")
if neighbors.dtype != data.dtype:
raise TypeError("'neighbors' and 'data' must have the same type.")
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
if not isinstance(neighbors, tf.sparse.SparseTensor):
raise ValueError("'neighbors' must be a SparseTensor.")
data_ndims = data.shape.ndims
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=1)
shape.check_static(
tensor=neighbors, tensor_name="neighbors", has_rank=data_ndims)
if not _is_dynamic_shape(tensors=(data, neighbors)):
shape.compare_dimensions(
tensors=(data, neighbors, neighbors),
tensor_names=("data", "neighbors", "neighbors"),
axes=(-2, -2, -1))
if sizes is None:
shape.compare_batch_dimensions(
tensors=(data, neighbors),
tensor_names=("data", "neighbors"),
last_axes=-3,
broadcast_compatible=False)
else:
shape.check_static(
tensor=sizes, tensor_name="sizes", has_rank=data_ndims - 2)
shape.compare_batch_dimensions(
tensors=(data, neighbors, sizes),
tensor_names=("data", "neighbors", "sizes"),
last_axes=(-3, -3, -1),
broadcast_compatible=False)
def check_valid_graph_pooling_input(data, pool_map, sizes):
if not data.dtype.is_floating:
raise TypeError("'data' must have a float type.")
if pool_map.dtype != data.dtype:
raise TypeError("'pool_map' and 'data' must have the same type.")
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
if not isinstance(pool_map, tf.sparse.SparseTensor):
raise ValueError("'pool_map' must be a SparseTensor.")
data_ndims = data.shape.ndims
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=1)
shape.check_static(
tensor=pool_map, tensor_name="pool_map", has_rank=data_ndims)
if not _is_dynamic_shape(tensors=(data, pool_map)):
shape.compare_dimensions(
tensors=(data, pool_map),
tensor_names=("data", "pool_map"),
axes=(-2, -1))
if sizes is None:
shape.compare_batch_dimensions(
tensors=(data, pool_map),
tensor_names=("data", "pool_map"),
last_axes=-3,
broadcast_compatible=False)
else:
shape.check_static(
tensor=sizes, tensor_name="sizes", has_rank=data_ndims - 1)
shape.compare_batch_dimensions(
tensors=(data, pool_map, sizes),
tensor_names=("data", "pool_map", "sizes"),
last_axes=(-3, -3, -2),
broadcast_compatible=False)
def check_valid_graph_unpooling_input(data, pool_map, sizes):
if not data.dtype.is_floating:
raise TypeError("'data' must have a float type.")
if pool_map.dtype != data.dtype:
raise TypeError("'pool_map' and 'data' must have the same type.")
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
if not isinstance(pool_map, tf.sparse.SparseTensor):
raise ValueError("'pool_map' must be a SparseTensor.")
data_ndims = data.shape.ndims
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=1)
shape.check_static(tensor=data, tensor_name="data", has_rank_less_than=6)
shape.check_static(
tensor=pool_map, tensor_name="pool_map", has_rank=data_ndims)
if not _is_dynamic_shape(tensors=(data, pool_map)):
shape.compare_dimensions(
tensors=(data, pool_map),
tensor_names=("data", "pool_map"),
axes=(-2, -2))
if sizes is None:
shape.compare_batch_dimensions(
tensors=(data, pool_map),
tensor_names=("data", "pool_map"),
last_axes=-3,
broadcast_compatible=False)
else:
shape.check_static(
tensor=sizes, tensor_name="sizes", has_rank=data_ndims - 1)
shape.compare_batch_dimensions(
tensors=(data, pool_map, sizes),
tensor_names=("data", "pool_map", "sizes"),
last_axes=(-3, -3, -2),
broadcast_compatible=False)
def flatten_batch_to_2d(data, sizes=None, name=None):
with tf.compat.v1.name_scope(name, "utils_flatten_batch_to_2d",
[data, sizes]):
data = tf.convert_to_tensor(value=data)
if sizes is not None:
sizes = tf.convert_to_tensor(value=sizes)
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=2)
if sizes is not None:
shape.check_static(
tensor=sizes, tensor_name="sizes", has_rank=data.shape.ndims - 2)
shape.compare_batch_dimensions(
tensors=(data, sizes),
tensor_names=("data", "sizes"),
last_axes=(-3, -1),
broadcast_compatible=False)
data_shape = tf.shape(input=data)
if sizes is None:
flat = tf.reshape(data, shape=(-1, data_shape[-1]))
def unflatten(flat, name=None):
with tf.compat.v1.name_scope(name, "utils_unflatten", [flat]):
flat = tf.convert_to_tensor(value=flat)
output_shape = tf.concat((data_shape[:-1], tf.shape(input=flat)[-1:]),
axis=0)
return tf.reshape(flat, output_shape)
else:
mask = tf.sequence_mask(sizes, data_shape[-2])
mask_indices = tf.cast(tf.compat.v1.where(mask), tf.int32)
flat = tf.gather_nd(params=data, indices=mask_indices)
def unflatten(flat, name=None):
"""Invert flatten_batch_to_2d."""
with tf.compat.v1.name_scope(name, "utils_unflatten", [flat]):
flat = tf.convert_to_tensor(value=flat)
output_shape = tf.concat((data_shape[:-1], tf.shape(input=flat)[-1:]),
axis=0)
return tf.scatter_nd(
indices=mask_indices, updates=flat, shape=output_shape)
return flat, unflatten
def unflatten_2d_to_batch(data, sizes, max_rows=None, name=None):
with tf.compat.v1.name_scope(name, "utils_unflatten_2d_to_batch",
[data, sizes]):
data = tf.convert_to_tensor(value=data)
sizes = tf.convert_to_tensor(value=sizes)
if max_rows is None:
max_rows = tf.reduce_max(input_tensor=sizes)
else:
max_rows = tf.convert_to_tensor(value=max_rows)
shape.check_static(tensor=data, tensor_name="data", has_rank=2)
if not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
mask = tf.sequence_mask(sizes, max_rows)
mask_indices = tf.cast(tf.compat.v1.where(mask), tf.int32)
output_shape = tf.concat(
(tf.shape(input=sizes), (max_rows,), tf.shape(input=data)[-1:]), axis=0)
return tf.scatter_nd(indices=mask_indices, updates=data, shape=output_shape)
def convert_to_block_diag_2d(data,
sizes=None,
validate_indices=False,
name=None):
with tf.compat.v1.name_scope(name, "utils_convert_to_block_diag_2d",
[data, sizes, validate_indices]):
data = tf.compat.v1.convert_to_tensor_or_sparse_tensor(value=data)
if sizes is not None:
sizes = tf.convert_to_tensor(value=sizes)
if not isinstance(data, tf.SparseTensor):
raise TypeError("'data' must be a 'SparseTensor'.")
if sizes is not None and not sizes.dtype.is_integer:
raise TypeError("'sizes' must have an integer type.")
shape.check_static(tensor=data, tensor_name="data", has_rank_greater_than=2)
if sizes is not None:
shape.check_static(
tensor=sizes,
tensor_name="sizes",
has_rank=data.shape.ndims - 1,
has_dim_equals=(-1, 2))
shape.compare_batch_dimensions(
tensors=(data, sizes),
tensor_names=("data", "sizes"),
last_axes=(-3, -2),
broadcast_compatible=False)
data_shape = tf.shape(input=data)
data = tf.sparse.reshape(data, [-1, data_shape[-2], data_shape[-1]])
indices = data.indices
if sizes is not None:
sizes = tf.cast(tf.reshape(sizes, shape=(-1, 2)), tf.int64)
if validate_indices:
in_bounds = ~tf.reduce_any(
input_tensor=indices[:, 1:] >= tf.gather(sizes, indices[:, 0]),
axis=-1)
indices = tf.boolean_mask(tensor=indices, mask=in_bounds)
values = tf.boolean_mask(tensor=data.values, mask=in_bounds)
else:
values = data.values
cumsum = tf.cumsum(sizes, axis=0, exclusive=True)
index_shift = tf.gather(cumsum, indices[:, 0])
indices = indices[:, 1:] + index_shift
block_diag = tf.SparseTensor(indices, values,
tf.reduce_sum(input_tensor=sizes, axis=0))
else:
data_shape = tf.shape(input=data, out_type=tf.int64)
index_shift = tf.expand_dims(indices[:, 0], -1) * data_shape[1:]
indices = indices[:, 1:] + index_shift
block_diag = tf.SparseTensor(indices, data.values,
data_shape[0] * data_shape[1:])
return block_diag
__all__ = []
| true
| true
|
1c43253d4f94cba7de19dd591bd349e829976301
| 20,956
|
py
|
Python
|
pensa/statesinfo/discrete_states.py
|
drorlab/pensa
|
0d4c138793d6e4f05f85cb9ece2bf4f0ddc1882f
|
[
"MIT"
] | 55
|
2020-11-18T07:03:46.000Z
|
2022-03-29T02:47:10.000Z
|
pensa/statesinfo/discrete_states.py
|
drorlab/pensa
|
0d4c138793d6e4f05f85cb9ece2bf4f0ddc1882f
|
[
"MIT"
] | 11
|
2020-11-18T16:43:43.000Z
|
2022-02-22T20:02:22.000Z
|
pensa/statesinfo/discrete_states.py
|
drorlab/pensa
|
0d4c138793d6e4f05f85cb9ece2bf4f0ddc1882f
|
[
"MIT"
] | 11
|
2020-11-19T04:34:36.000Z
|
2022-03-01T23:48:57.000Z
|
import numpy as np
from queue import PriorityQueue
import math
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.signal import argrelextrema
import os
from pensa.features import *
# -- Functions to cluster feature distributions into discrete states --
def _smooth(x,window_len,window=None):
"""
Smooth data so that true extrema can be found without any noise
Parameters
----------
x : list
Distribution to be smoothed.
window_len : int
number of bins to smooth over.
window : str, optional
Type of window to use for the smoothing. The default is None=Hanning.
Raises
------
ValueError
If window argument is not recognised.
Returns
-------
list
Smoothed distribution.
"""
if window is None:
window_type='hanning'
if x.ndim != 1:
raise ValueError
if x.size < window_len:
raise ValueError
if window_len<3:
return x
if not window_type in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window_type == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window_type+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
def _find_nearest(distr, value):
"""
Find the nearest value in a distribution to an arbitrary reference value.
Parameters
----------
distr : list
The distribution to locate a certain point within.
value : float
Reference value for locating within the distribution.
Returns
-------
float
Closest value to reference value in distribution.
"""
array = np.array(distr)
idx = (np.abs(array - value)).argmin()
return array[idx]
def _printKclosest(arr,n,x,k):
"""
Print K closest values to a specified value.
Parameters
----------
arr : list
The distribution of values.
n : int
Search through the first n values of arr for k closest values.
x : float
The reference value for which the closest values are sought.
k : int
Number of closest values desired.
Returns
-------
a : list
The closest k values to x.
"""
a=[]
# Make a max heap of difference with
# first k elements.
pq = PriorityQueue()
for neighb in range(k):
pq.put((-abs(arr[neighb]-x),neighb))
# Now process remaining elements
for neighb in range(k,n):
diff = abs(arr[neighb]-x)
p,pi = pq.get()
curr = -p
# If difference with current
# element is more than root,
# then put it back.
if diff>curr:
pq.put((-curr,pi))
continue
else:
# Else remove root and insert
pq.put((-diff,neighb))
# Print contents of heap.
while(not pq.empty()):
p,q = pq.get()
a.append(str("{} ".format(arr[q])))
return a
def _gauss(x, x0, sigma, a):
"""
Create a Gaussian distribution for a given x-axis linsapce and Gaussian parameters.
Parameters
----------
x : list
x-axis distribution.
x0 : float
Mean x-value for Gaussian.
sigma : float
Gaussian sigma, related to FWHM.
a : float
Gaussian amplitude.
Returns
-------
gaussian : list
y-axis Gaussian distribution over the x-axis space.
"""
if sigma != 0:
gaussian = abs(a*np.exp(-(x-x0)**2/(2*sigma**2)))
return gaussian
def _bimodal(x,mu1,sigma1,A1,mu2,sigma2,A2):
""" Two gaussians """
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)
def _trimodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3):
""" Three gaussians """
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)
def _quadmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4):
""" Four gaussians """
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)
def _quinmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5):
""" Five gaussians """
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)
def _sexmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6):
""" Six gaussians """
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)
def _septmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7):
""" Seven gaussians """
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)
def _octomodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7,mu8,sigma8,A8):
""" Eight gaussians """
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)+_gauss(x,mu8,sigma8,A8)
def _nonamodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7,mu8,sigma8,A8,mu9,sigma9,A9):
""" Nine gaussians """
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)+_gauss(x,mu8,sigma8,A8)+_gauss(x,mu9,sigma9,A9)
def _decamodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7,mu8,sigma8,A8,mu9,sigma9,A9,mu10,sigma10,A10):
""" Ten gaussians """
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)+_gauss(x,mu8,sigma8,A8)+_gauss(x,mu9,sigma9,A9)+_gauss(x,mu10,sigma10,A10)
def _integral(x, mu, sigma, A):
"""
Gaussian integral for evaluating state probabilities. Integration between
negative infinity and x.
Parameters
----------
x : float
Upper limit for integral.
mu : float
Gaussian mean.
sigma : float
Gaussian sigma.
A : float
Gaussian amplitude.
Returns
-------
integral : float
Area under Gaussian from negative infinity to x.
"""
integral = (A/2) * (1 + math.erf((x - mu) / (sigma * np.sqrt(2))))
return integral
def _gauss_fit(distribution, traj1_len, gauss_bin, gauss_smooth):
"""
Obtaining the gaussians to fit the distribution into a Gaussian mix.
Bin number is chosen based on 3 degree resolution (120 bins for 360 degrees)
Parameters
----------
distribution : list
Distribution of interest for the fitting.
gauss_bin : int
Bin the distribution into gauss_bin bins.
gauss_smooth : int
Smooth the distribution according to a Hanning window length of gauss_smooth.
Returns
-------
gaussians : list
y-axis values for the Gaussian distribution.
xline : list
x-axis values for the Gaussian distribution.
"""
distr1 = distribution[:traj1_len]
distr2 = distribution[traj1_len:]
histox = np.histogram(distribution, bins=gauss_bin, density=True)[1]
histo1 = np.histogram(distr1, bins=gauss_bin, range=(min(histox),max(histox)), density=True)[0]
histo2 = np.histogram(distr2, bins=gauss_bin, range=(min(histox),max(histox)), density=True)[0]
combined_histo = [(height1 + height2)/2 for height1,height2 in zip(histo1,histo2)]
distributionx = _smooth(histox[0:-1], gauss_smooth)
## Setting histrogram minimum to zero with uniform linear shift (for noisey distributions)
distributiony = _smooth(combined_histo-min(combined_histo), gauss_smooth)
maxima = [distributiony[item] for item in argrelextrema(distributiony, np.greater)][0]
## Obtain Gaussian guess params
mean_pop=[]
sigma_pop=[]
num_closest_neighb=28
## Locate sigma from FWHM for each maxima
sig_vals=[]
for extrema in maxima:
## Finding closest values to half maximum
closest_yvals = _printKclosest(distributiony, len(distributiony), extrema*0.5, num_closest_neighb)
closest_xvals = [np.where(distributiony==float(closesty))[0][0] for closesty in closest_yvals]
mean_xval = distributionx[np.where(distributiony==extrema)[0][0]]
half_max_xval = _find_nearest(distributionx[closest_xvals],mean_xval)
FWHM = np.absolute(half_max_xval - mean_xval)
sigma = FWHM /(2*(np.sqrt(2*np.log(2))))
sig_vals.append(sigma)
##the mean x of the gaussian is the value of x at the peak of y
mean_vals=[distributionx[np.where(distributiony==extrema)[0][0]] for extrema in maxima]
for extr_num in range(len(maxima)):
mean_pop.append(mean_vals[extr_num])
sigma_pop.append(sig_vals[extr_num])
##x is the space of angles
Gauss_xvals=np.linspace(min(distribution),max(distribution),10000)
##choosing the fitting mode
peak_number=[_gauss,_bimodal,_trimodal,_quadmodal,_quinmodal,_sexmodal,_septmodal,_octomodal,_nonamodal,_decamodal]
mode=peak_number[len(sig_vals)-1]
expected=[]
for param_num in range(len(mean_pop)):
expected.append(mean_pop[param_num])
expected.append(sigma_pop[param_num])
expected.append(maxima[param_num])
params, cov = curve_fit(mode,distributionx,distributiony,expected,maxfev=1000000)
gaussians=[]
gauss_num_space=np.linspace(0,(len(params))-3,int(len(params)/3))
for gauss_index in gauss_num_space:
intmax = _integral(max(distribution),
params[0+int(gauss_index)],
params[1+int(gauss_index)],
params[2+int(gauss_index)])
intmin = _integral(min(distribution),
params[0+int(gauss_index)],
params[1+int(gauss_index)],
params[2+int(gauss_index)])
if np.abs(intmax-intmin)>0.02:
gaussians.append(_gauss(Gauss_xvals,
params[0+int(gauss_index)],
params[1+int(gauss_index)],
params[2+int(gauss_index)]))
return gaussians, Gauss_xvals
def smart_gauss_fit(distr, traj1_len, gauss_bins=180, gauss_smooth=None, write_name=None):
"""
Obtaining the gaussians to fit the distribution into a Gaussian mix.
Bin number automatically adjusted if the Gaussian fit experiences errors.
Parameters
----------
distr : list
Distribution of interest for the fitting.
gauss_bins : int, optional
Bin the distribution into gauss_bin bins. The default is 180.
gauss_smooth : int, optional
Smooth the distribution according to a Hanning window length of gauss_smooth.
The default is ~10% of gauss_bins.
write_name : str, optional
Used in warning to notify which feature has had binning altered during clustering.
The default is None.
Returns
-------
gaussians : list
y-axis values for the Gaussian distribution.
xline : list
x-axis values for the Gaussian distribution.
"""
smooth_origin = gauss_smooth
bin_origin = gauss_bins
if gauss_smooth is None:
gauss_smooth = int(gauss_bins*0.10)
trial = 0
attempt_no = 0
##making a list of +/- values for bin trials to ensure minimal change
bin_adjust_up = np.array(range(1,10000))
bin_adjust_down = bin_adjust_up.copy()*-1
bin_adjust = np.insert(bin_adjust_up, np.arange(len(bin_adjust_down)), bin_adjust_down)
##if clustering does not work for a given bin number then adjust the bin number
while trial < 1:
try:
gaussians, Gauss_xvals = _gauss_fit(distr, traj1_len, gauss_bins, gauss_smooth)
trial += 1
except:
attempt_no += 1
trial = 0
gauss_bins = bin_origin + bin_adjust[attempt_no]
##only warn about clustering changes if specific parameters were input
if bin_origin != 180 or smooth_origin is not None:
if attempt_no > 0.1*bin_origin:
if write_name is None:
print('Warning: Altered gauss_bins by >10% for clustering.\nYou might want to check cluster plot.')
else:
print('Warning: Altered gauss_bins by >10% for clustering of '+write_name+'.\nYou might want to check cluster plot.')
return gaussians, Gauss_xvals
def get_intersects(gaussians, distribution, Gauss_xvals, write_plots=None,write_name=None):
"""
Obtain the intersects of a mixture of Gaussians which have been obtained
from decomposing a distribution into Gaussians. Additional state limits are
added at the beginning and end of the distribution.
Parameters
----------
gaussians : list of lists
A list of X gaussians.
distribution : list
The distribution that Gaussians have been obtained from.
xline : list
The x-axis linespace that the distribution spans.
write_plots : bool, optional
If true, visualise the states over the raw distribution. The default is None.
write_name : str, optional
Filename for write_plots. The default is None.
Returns
-------
all_intersects : list
All the Gaussian intersects.
"""
##adding the minimum angle value as the first boundary
all_intersects=[min(distribution)]
mean_gauss_xval=[]
for gauss_num in range(len(gaussians)):
mean_gauss_xval.append(Gauss_xvals[list(gaussians[gauss_num]).index(max(gaussians[gauss_num]))])
##sort gaussians in order of their mean xval
reorder_gaussians=[gaussians[mean_gauss_xval.index(mean)] for mean in sorted(mean_gauss_xval)]
# reorder_gaussians=[gaussians[gauss_num] for gauss_num in reorder_indices]
for gauss_index in range(len(reorder_gaussians)-1):
##Find indices between neighbouring gaussians
idx = np.argwhere(np.diff(np.sign(reorder_gaussians[gauss_index] - reorder_gaussians[gauss_index+1]))).flatten()
if len(idx)==1:
all_intersects.append(float(Gauss_xvals[idx][0]) )
elif len(idx)!=0:
## Select the intersect with the maximum probability
intersect_ymax=max([reorder_gaussians[gauss_index][intersect] for intersect in idx])
intersect_ymax_index=[item for item in idx if reorder_gaussians[gauss_index][item]==intersect_ymax]
all_intersects.append(float(Gauss_xvals[intersect_ymax_index]))
## For gaussian neighbours that don't intersect, set state limit as center between maxima
elif len(idx)==0:
gauss_max1=list(reorder_gaussians[gauss_index]).index(max(reorder_gaussians[gauss_index]))
gauss_max2=list(reorder_gaussians[gauss_index+1]).index(max(reorder_gaussians[gauss_index+1]))
intersect = 0.5* np.abs(Gauss_xvals[gauss_max2] + Gauss_xvals[gauss_max1])
all_intersects.append(float(intersect))
all_intersects.append(max(distribution))
if write_plots is True:
if not os.path.exists('ssi_plots/'):
os.makedirs('ssi_plots/')
plt.figure()
plt.ion()
plt.hist(distribution,bins=360, density=True, alpha=0.5)
for gauss_index in range(len(reorder_gaussians)):
plt.plot(Gauss_xvals, reorder_gaussians[gauss_index], lw=2)
for intersect_index in range(len(all_intersects)):
plt.axvline(all_intersects[intersect_index],color='k',lw=1,ls='--')
plt.xlabel('Radians')
plt.ylabel('Count')
plt.title(write_name)
plt.ioff()
plt.savefig('ssi_plots/'+write_name+".png")
plt.close()
return all_intersects
def determine_state_limits(distr, traj1_len, gauss_bins=180, gauss_smooth=None, write_plots=None, write_name=None):
"""
Cluster a distribution into discrete states with well-defined limits.
The function handles both residue angle distributions and water
distributions. For waters, the assignment of an additional non-angular
state is performed if changes in pocket occupancy occur. The clustering
requires that the distribution can be decomposed to a mixture of Gaussians.
Parameters
----------
distr : list
Distribution for specific feature.
gauss_bins : int, optional
Number of histogram bins to assign for the clustering algorithm.
The default is 180.
gauss_smooth : int, optional
Number of bins to perform smoothing over. The default is ~10% of gauss_bins.
write_plots : bool, optional
If true, visualise the states over the raw distribution. The default is None.
write_name : str, optional
Filename for write_plots. The default is None.
Returns
-------
list
State intersects for each cluster in numerical order.
"""
new_dist=distr.copy()
distribution=[item for item in new_dist if item != 10000.0]
##obtaining the gaussian fit
gaussians, Gauss_xvals = smart_gauss_fit(distribution, traj1_len, gauss_bins, gauss_smooth, write_name)
##discretising each state by gaussian intersects
intersection_of_states = get_intersects(gaussians, distribution, Gauss_xvals, write_plots, write_name)
if distr.count(10000.0)>=1:
intersection_of_states.append(20000.0)
order_intersect=np.sort(intersection_of_states)
return list(order_intersect)
# -- Functions to operate on discrete states --
def _check(value,x,y):
"""
Check if a value is between x and y
Parameters
----------
value : float
Value of interest.
x : float
Limit x.
y : float
Limit y.
Returns
-------
int
Numerical bool if value is between limits x and y.
"""
if x <= value <= y:
return 1
else:
return 0
def calculate_entropy(state_limits,distribution_list):
"""
Calculate the Shannon entropy of a distribution as the summation of all
-p*log(p) where p refers to the probability of a conformational state.
Parameters
----------
state_limits : list of lists
A list of values that represent the limits of each state for each
distribution.
distribution_list : list of lists
A list containing multivariate distributions (lists) for a particular
residue or water
Returns
-------
entropy : float
The Shannon entropy value
"""
state_lims = state_limits.copy()
dist_list = distribution_list.copy()
## Ignore singular states and corresponding distributions
state_no = 0
while state_no < len(state_lims):
if len(state_lims[state_no])==2:
del dist_list[state_no]
del state_lims[state_no]
else:
state_no +=1
entropy=0.0
if len(state_lims)!=0:
## subtract 1 since number of states = number of partitions - 1
mut_prob=np.zeros(([len(state_lims[i])-1 for i in range(len(state_lims))]))
##iterating over every multidimensional index in the array
it = np.nditer(mut_prob, flags=['multi_index'])
while not it.finished:
arrayindices=list(it.multi_index)
limit_occupancy_checks=np.zeros((len(arrayindices), len(dist_list[0])))
for dist_num in range(len(arrayindices)):
limits=[state_lims[dist_num][arrayindices[dist_num]], state_lims[dist_num][arrayindices[dist_num]+1]]
distribution=dist_list[dist_num]
for frame_num in range(len(distribution)):
limit_occupancy_checks[dist_num][frame_num]= _check(distribution[frame_num],limits[0],limits[1])
mut_prob[it.multi_index]= sum(np.prod(limit_occupancy_checks,axis=0)) / len(limit_occupancy_checks[0])
##calculating the entropy as the summation of all -p*log(p)
if mut_prob[it.multi_index] != 0:
entropy+=-1*mut_prob[it.multi_index]*math.log(mut_prob[it.multi_index],2)
it.iternext()
return entropy
| 36.508711
| 261
| 0.644541
|
import numpy as np
from queue import PriorityQueue
import math
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.signal import argrelextrema
import os
from pensa.features import *
def _smooth(x,window_len,window=None):
if window is None:
window_type='hanning'
if x.ndim != 1:
raise ValueError
if x.size < window_len:
raise ValueError
if window_len<3:
return x
if not window_type in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window_type == 'flat':
w=np.ones(window_len,'d')
else:
w=eval('np.'+window_type+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
def _find_nearest(distr, value):
array = np.array(distr)
idx = (np.abs(array - value)).argmin()
return array[idx]
def _printKclosest(arr,n,x,k):
a=[]
pq = PriorityQueue()
for neighb in range(k):
pq.put((-abs(arr[neighb]-x),neighb))
for neighb in range(k,n):
diff = abs(arr[neighb]-x)
p,pi = pq.get()
curr = -p
if diff>curr:
pq.put((-curr,pi))
continue
else:
pq.put((-diff,neighb))
while(not pq.empty()):
p,q = pq.get()
a.append(str("{} ".format(arr[q])))
return a
def _gauss(x, x0, sigma, a):
if sigma != 0:
gaussian = abs(a*np.exp(-(x-x0)**2/(2*sigma**2)))
return gaussian
def _bimodal(x,mu1,sigma1,A1,mu2,sigma2,A2):
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)
def _trimodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3):
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)
def _quadmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4):
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)
def _quinmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5):
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)
def _sexmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6):
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)
def _septmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7):
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)
def _octomodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7,mu8,sigma8,A8):
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)+_gauss(x,mu8,sigma8,A8)
def _nonamodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7,mu8,sigma8,A8,mu9,sigma9,A9):
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)+_gauss(x,mu8,sigma8,A8)+_gauss(x,mu9,sigma9,A9)
def _decamodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7,mu8,sigma8,A8,mu9,sigma9,A9,mu10,sigma10,A10):
return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)+_gauss(x,mu8,sigma8,A8)+_gauss(x,mu9,sigma9,A9)+_gauss(x,mu10,sigma10,A10)
def _integral(x, mu, sigma, A):
integral = (A/2) * (1 + math.erf((x - mu) / (sigma * np.sqrt(2))))
return integral
def _gauss_fit(distribution, traj1_len, gauss_bin, gauss_smooth):
distr1 = distribution[:traj1_len]
distr2 = distribution[traj1_len:]
histox = np.histogram(distribution, bins=gauss_bin, density=True)[1]
histo1 = np.histogram(distr1, bins=gauss_bin, range=(min(histox),max(histox)), density=True)[0]
histo2 = np.histogram(distr2, bins=gauss_bin, range=(min(histox),max(histox)), density=True)[0]
combined_histo = [(height1 + height2)/2 for height1,height2 in zip(histo1,histo2)]
distributionx = _smooth(histox[0:-1], gauss_smooth)
axima = [distributiony[item] for item in argrelextrema(distributiony, np.greater)][0]
=[]
num_closest_neighb=28
ma:
distributiony, len(distributiony), extrema*0.5, num_closest_neighb)
closest_xvals = [np.where(distributiony==float(closesty))[0][0] for closesty in closest_yvals]
mean_xval = distributionx[np.where(distributiony==extrema)[0][0]]
half_max_xval = _find_nearest(distributionx[closest_xvals],mean_xval)
FWHM = np.absolute(half_max_xval - mean_xval)
sigma = FWHM /(2*(np.sqrt(2*np.log(2))))
sig_vals.append(sigma)
[0][0]] for extrema in maxima]
for extr_num in range(len(maxima)):
mean_pop.append(mean_vals[extr_num])
sigma_pop.append(sig_vals[extr_num])
ace(min(distribution),max(distribution),10000)
bimodal,_trimodal,_quadmodal,_quinmodal,_sexmodal,_septmodal,_octomodal,_nonamodal,_decamodal]
mode=peak_number[len(sig_vals)-1]
expected=[]
for param_num in range(len(mean_pop)):
expected.append(mean_pop[param_num])
expected.append(sigma_pop[param_num])
expected.append(maxima[param_num])
params, cov = curve_fit(mode,distributionx,distributiony,expected,maxfev=1000000)
gaussians=[]
gauss_num_space=np.linspace(0,(len(params))-3,int(len(params)/3))
for gauss_index in gauss_num_space:
intmax = _integral(max(distribution),
params[0+int(gauss_index)],
params[1+int(gauss_index)],
params[2+int(gauss_index)])
intmin = _integral(min(distribution),
params[0+int(gauss_index)],
params[1+int(gauss_index)],
params[2+int(gauss_index)])
if np.abs(intmax-intmin)>0.02:
gaussians.append(_gauss(Gauss_xvals,
params[0+int(gauss_index)],
params[1+int(gauss_index)],
params[2+int(gauss_index)]))
return gaussians, Gauss_xvals
def smart_gauss_fit(distr, traj1_len, gauss_bins=180, gauss_smooth=None, write_name=None):
smooth_origin = gauss_smooth
bin_origin = gauss_bins
if gauss_smooth is None:
gauss_smooth = int(gauss_bins*0.10)
trial = 0
attempt_no = 0
bin_adjust_up.copy()*-1
bin_adjust = np.insert(bin_adjust_up, np.arange(len(bin_adjust_down)), bin_adjust_down)
_fit(distr, traj1_len, gauss_bins, gauss_smooth)
trial += 1
except:
attempt_no += 1
trial = 0
gauss_bins = bin_origin + bin_adjust[attempt_no]
tempt_no > 0.1*bin_origin:
if write_name is None:
print('Warning: Altered gauss_bins by >10% for clustering.\nYou might want to check cluster plot.')
else:
print('Warning: Altered gauss_bins by >10% for clustering of '+write_name+'.\nYou might want to check cluster plot.')
return gaussians, Gauss_xvals
def get_intersects(gaussians, distribution, Gauss_xvals, write_plots=None,write_name=None):
s_xval=[]
for gauss_num in range(len(gaussians)):
mean_gauss_xval.append(Gauss_xvals[list(gaussians[gauss_num]).index(max(gaussians[gauss_num]))])
ndex(mean)] for mean in sorted(mean_gauss_xval)]
for gauss_index in range(len(reorder_gaussians)-1):
eorder_gaussians[gauss_index] - reorder_gaussians[gauss_index+1]))).flatten()
if len(idx)==1:
all_intersects.append(float(Gauss_xvals[idx][0]) )
elif len(idx)!=0:
gauss_index][intersect] for intersect in idx])
intersect_ymax_index=[item for item in idx if reorder_gaussians[gauss_index][item]==intersect_ymax]
all_intersects.append(float(Gauss_xvals[intersect_ymax_index]))
ss_index]).index(max(reorder_gaussians[gauss_index]))
gauss_max2=list(reorder_gaussians[gauss_index+1]).index(max(reorder_gaussians[gauss_index+1]))
intersect = 0.5* np.abs(Gauss_xvals[gauss_max2] + Gauss_xvals[gauss_max1])
all_intersects.append(float(intersect))
all_intersects.append(max(distribution))
if write_plots is True:
if not os.path.exists('ssi_plots/'):
os.makedirs('ssi_plots/')
plt.figure()
plt.ion()
plt.hist(distribution,bins=360, density=True, alpha=0.5)
for gauss_index in range(len(reorder_gaussians)):
plt.plot(Gauss_xvals, reorder_gaussians[gauss_index], lw=2)
for intersect_index in range(len(all_intersects)):
plt.axvline(all_intersects[intersect_index],color='k',lw=1,ls='--')
plt.xlabel('Radians')
plt.ylabel('Count')
plt.title(write_name)
plt.ioff()
plt.savefig('ssi_plots/'+write_name+".png")
plt.close()
return all_intersects
def determine_state_limits(distr, traj1_len, gauss_bins=180, gauss_smooth=None, write_plots=None, write_name=None):
new_dist=distr.copy()
distribution=[item for item in new_dist if item != 10000.0]
##obtaining the gaussian fit
gaussians, Gauss_xvals = smart_gauss_fit(distribution, traj1_len, gauss_bins, gauss_smooth, write_name)
##discretising each state by gaussian intersects
intersection_of_states = get_intersects(gaussians, distribution, Gauss_xvals, write_plots, write_name)
if distr.count(10000.0)>=1:
intersection_of_states.append(20000.0)
order_intersect=np.sort(intersection_of_states)
return list(order_intersect)
# -- Functions to operate on discrete states --
def _check(value,x,y):
if x <= value <= y:
return 1
else:
return 0
def calculate_entropy(state_limits,distribution_list):
state_lims = state_limits.copy()
dist_list = distribution_list.copy()
## Ignore singular states and corresponding distributions
state_no = 0
while state_no < len(state_lims):
if len(state_lims[state_no])==2:
del dist_list[state_no]
del state_lims[state_no]
else:
state_no +=1
entropy=0.0
if len(state_lims)!=0:
## subtract 1 since number of states = number of partitions - 1
mut_prob=np.zeros(([len(state_lims[i])-1 for i in range(len(state_lims))]))
##iterating over every multidimensional index in the array
it = np.nditer(mut_prob, flags=['multi_index'])
while not it.finished:
arrayindices=list(it.multi_index)
limit_occupancy_checks=np.zeros((len(arrayindices), len(dist_list[0])))
for dist_num in range(len(arrayindices)):
limits=[state_lims[dist_num][arrayindices[dist_num]], state_lims[dist_num][arrayindices[dist_num]+1]]
distribution=dist_list[dist_num]
for frame_num in range(len(distribution)):
limit_occupancy_checks[dist_num][frame_num]= _check(distribution[frame_num],limits[0],limits[1])
mut_prob[it.multi_index]= sum(np.prod(limit_occupancy_checks,axis=0)) / len(limit_occupancy_checks[0])
##calculating the entropy as the summation of all -p*log(p)
if mut_prob[it.multi_index] != 0:
entropy+=-1*mut_prob[it.multi_index]*math.log(mut_prob[it.multi_index],2)
it.iternext()
return entropy
| true
| true
|
1c4325ae6919f9ae41b7a7214ba23df6453cd811
| 291
|
py
|
Python
|
networkx/algorithms/isomorphism/__init__.py
|
FrancescoBonacina/networkx
|
a73a610e0bbd6e13b183b15ca47b221df5f8e26a
|
[
"BSD-3-Clause"
] | 10
|
2020-04-29T10:38:03.000Z
|
2022-03-16T03:30:28.000Z
|
networkx/algorithms/isomorphism/__init__.py
|
FrancescoBonacina/networkx
|
a73a610e0bbd6e13b183b15ca47b221df5f8e26a
|
[
"BSD-3-Clause"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
networkx/algorithms/isomorphism/__init__.py
|
FrancescoBonacina/networkx
|
a73a610e0bbd6e13b183b15ca47b221df5f8e26a
|
[
"BSD-3-Clause"
] | 2
|
2020-04-08T07:50:23.000Z
|
2020-04-08T11:59:03.000Z
|
from networkx.algorithms.isomorphism.isomorph import *
from networkx.algorithms.isomorphism.vf2userfunc import *
from networkx.algorithms.isomorphism.matchhelpers import *
from networkx.algorithms.isomorphism.temporalisomorphvf2 import *
from networkx.algorithms.isomorphism.ismags import *
| 48.5
| 65
| 0.862543
|
from networkx.algorithms.isomorphism.isomorph import *
from networkx.algorithms.isomorphism.vf2userfunc import *
from networkx.algorithms.isomorphism.matchhelpers import *
from networkx.algorithms.isomorphism.temporalisomorphvf2 import *
from networkx.algorithms.isomorphism.ismags import *
| true
| true
|
1c432939ac64eb0fbfab497b70dd63da3ec4d5ff
| 606
|
py
|
Python
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/NV/shadow_samplers_cube.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/NV/shadow_samplers_cube.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/NV/shadow_samplers_cube.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_NV_shadow_samplers_cube'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_NV_shadow_samplers_cube',error_checker=_errors._error_checker)
GL_SAMPLER_CUBE_SHADOW_NV=_C('GL_SAMPLER_CUBE_SHADOW_NV',0x8DC5)
| 37.875
| 127
| 0.793729
|
from OpenGL import platform as _p, arrays
from OpenGL.raw.GLES2 import _types as _cs
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_NV_shadow_samplers_cube'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_NV_shadow_samplers_cube',error_checker=_errors._error_checker)
GL_SAMPLER_CUBE_SHADOW_NV=_C('GL_SAMPLER_CUBE_SHADOW_NV',0x8DC5)
| true
| true
|
1c43298bed00cdb37ea907188f0a6c7890f1ffd1
| 12,765
|
py
|
Python
|
src/c3nav/editor/api.py
|
bate/c3nav
|
9a86dd3eaeb3a10af3c5fa869575ed1e9300465a
|
[
"Apache-2.0"
] | null | null | null |
src/c3nav/editor/api.py
|
bate/c3nav
|
9a86dd3eaeb3a10af3c5fa869575ed1e9300465a
|
[
"Apache-2.0"
] | null | null | null |
src/c3nav/editor/api.py
|
bate/c3nav
|
9a86dd3eaeb3a10af3c5fa869575ed1e9300465a
|
[
"Apache-2.0"
] | null | null | null |
from itertools import chain
from django.db.models import Prefetch, Q
from rest_framework.decorators import detail_route, list_route
from rest_framework.exceptions import PermissionDenied, ValidationError
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet, ViewSet
from shapely.ops import cascaded_union
from c3nav.editor.models import ChangeSet
from c3nav.editor.views.base import etag_func
from c3nav.mapdata.api import api_etag
from c3nav.mapdata.models import Area, Door, MapUpdate, Source
from c3nav.mapdata.models.geometry.space import POI
from c3nav.mapdata.utils.user import can_access_editor
class EditorViewSet(ViewSet):
"""
Editor API
/geometries/ returns a list of geojson features, you have to specify ?level=<id> or ?space=<id>
/geometrystyles/ returns styling information for all geometry types
/bounds/ returns the maximum bounds of the map
"""
@staticmethod
def _get_level_geometries(level):
buildings = level.buildings.all()
buildings_geom = cascaded_union([building.geometry for building in buildings])
spaces = {space.pk: space for space in level.spaces.all()}
holes_geom = []
for space in spaces.values():
if space.outside:
space.geometry = space.geometry.difference(buildings_geom)
columns_geom = cascaded_union([column.geometry for column in space.columns.all()])
space.geometry = space.geometry.difference(columns_geom)
space_holes_geom = cascaded_union([hole.geometry for hole in space.holes.all()])
holes_geom.append(space_holes_geom.intersection(space.geometry))
space.geometry = space.geometry.difference(space_holes_geom)
holes_geom = cascaded_union(holes_geom)
for building in buildings:
building.original_geometry = building.geometry
for obj in buildings:
obj.geometry = obj.geometry.difference(holes_geom)
results = []
results.extend(buildings)
for door in level.doors.all():
results.append(door)
results.extend(spaces.values())
return results
@staticmethod
def _get_levels_pk(request, level):
# noinspection PyPep8Naming
Level = request.changeset.wrap_model('Level')
levels_under = ()
levels_on_top = ()
lower_level = level.lower(Level).first()
primary_levels = (level,) + ((lower_level,) if lower_level else ())
secondary_levels = Level.objects.filter(on_top_of__in=primary_levels).values_list('pk', 'on_top_of')
if lower_level:
levels_under = tuple(pk for pk, on_top_of in secondary_levels if on_top_of == lower_level.pk)
if True:
levels_on_top = tuple(pk for pk, on_top_of in secondary_levels if on_top_of == level.pk)
levels = chain([level.pk], levels_under, levels_on_top)
return levels, levels_on_top, levels_under
# noinspection PyPep8Naming
@list_route(methods=['get'])
@api_etag(etag_func=etag_func, cache_parameters={'level': str, 'space': str})
def geometries(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
Level = request.changeset.wrap_model('Level')
Space = request.changeset.wrap_model('Space')
level = request.GET.get('level')
space = request.GET.get('space')
if level is not None:
if space is not None:
raise ValidationError('Only level or space can be specified.')
level = get_object_or_404(Level.objects.filter(Level.q_for_request(request)), pk=level)
levels, levels_on_top, levels_under = self._get_levels_pk(request, level)
# don't prefetch groups for now as changesets do not yet work with m2m-prefetches
levels = Level.objects.filter(pk__in=levels).filter(Level.q_for_request(request))
# graphnodes_qs = request.changeset.wrap_model('GraphNode').objects.all()
levels = levels.prefetch_related(
Prefetch('spaces', request.changeset.wrap_model('Space').objects.filter(Space.q_for_request(request))),
Prefetch('doors', request.changeset.wrap_model('Door').objects.filter(Door.q_for_request(request))),
'buildings', 'spaces__holes', 'spaces__groups', 'spaces__columns', 'spaces__altitudemarkers',
# Prefetch('spaces__graphnodes', graphnodes_qs)
)
levels = {s.pk: s for s in levels}
level = levels[level.pk]
levels_under = [levels[pk] for pk in levels_under]
levels_on_top = [levels[pk] for pk in levels_on_top]
# todo: permissions
# graphnodes = tuple(chain(*(space.graphnodes.all()
# for space in chain(*(level.spaces.all() for level in levels.values())))))
# graphnodes_lookup = {node.pk: node for node in graphnodes}
# graphedges = request.changeset.wrap_model('GraphEdge').objects.all()
# graphedges = graphedges.filter(Q(from_node__in=graphnodes) | Q(to_node__in=graphnodes))
# graphedges = graphedges.select_related('waytype')
# this is faster because we only deserialize graphnode geometries once
# missing_graphnodes = graphnodes_qs.filter(pk__in=set(chain(*((edge.from_node_id, edge.to_node_id)
# for edge in graphedges))))
# graphnodes_lookup.update({node.pk: node for node in missing_graphnodes})
# for edge in graphedges:
# edge._from_node_cache = graphnodes_lookup[edge.from_node_id]
# edge._to_node_cache = graphnodes_lookup[edge.to_node_id]
# graphedges = [edge for edge in graphedges if edge.from_node.space_id != edge.to_node.space_id]
results = chain(
*(self._get_level_geometries(l) for l in levels_under),
self._get_level_geometries(level),
*(self._get_level_geometries(l) for l in levels_on_top),
*(space.altitudemarkers.all() for space in level.spaces.all()),
# graphedges,
# graphnodes,
)
return Response([obj.to_geojson(instance=obj) for obj in results])
elif space is not None:
space_q_for_request = Space.q_for_request(request)
qs = Space.objects.filter(space_q_for_request)
space = get_object_or_404(qs.select_related('level', 'level__on_top_of'), pk=space)
level = space.level
doors = [door for door in level.doors.filter(Door.q_for_request(request)).all()
if door.geometry.intersects(space.geometry)]
doors_space_geom = cascaded_union([door.geometry for door in doors]+[space.geometry])
levels, levels_on_top, levels_under = self._get_levels_pk(request, level.primary_level)
if level.on_top_of_id is not None:
levels = chain([level.pk], levels_on_top)
other_spaces = Space.objects.filter(space_q_for_request, level__pk__in=levels).prefetch_related('groups')
space = next(s for s in other_spaces if s.pk == space.pk)
other_spaces = [s for s in other_spaces
if s.geometry.intersects(doors_space_geom) and s.pk != space.pk]
all_other_spaces = other_spaces
if level.on_top_of_id is None:
other_spaces_lower = [s for s in other_spaces if s.level_id in levels_under]
other_spaces_upper = [s for s in other_spaces if s.level_id in levels_on_top]
else:
other_spaces_lower = [s for s in other_spaces if s.level_id == level.on_top_of_id]
other_spaces_upper = []
other_spaces = [s for s in other_spaces if s.level_id == level.pk]
space.bounds = True
buildings = level.buildings.all()
buildings_geom = cascaded_union([building.geometry for building in buildings])
for other_space in other_spaces:
if other_space.outside:
other_space.geometry = other_space.geometry.difference(buildings_geom)
for other_space in chain(other_spaces, other_spaces_lower, other_spaces_upper):
other_space.opacity = 0.4
other_space.color = '#ffffff'
for building in buildings:
building.opacity = 0.5
# todo: permissions
graphnodes = request.changeset.wrap_model('GraphNode').objects.all()
graphnodes = graphnodes.filter((Q(space__in=all_other_spaces)) | Q(space__pk=space.pk))
space_graphnodes = tuple(node for node in graphnodes if node.space_id == space.pk)
graphedges = request.changeset.wrap_model('GraphEdge').objects.all()
graphedges = graphedges.filter(Q(from_node__in=space_graphnodes) | Q(to_node__in=space_graphnodes))
graphedges = graphedges.select_related('from_node', 'to_node', 'waytype')
areas = space.areas.filter(Area.q_for_request(request)).prefetch_related('groups')
for area in areas:
area.opacity = 0.5
results = chain(
buildings,
other_spaces_lower,
doors,
other_spaces,
[space],
areas,
space.holes.all(),
space.stairs.all(),
space.ramps.all(),
space.obstacles.all(),
space.lineobstacles.all(),
space.columns.all(),
space.altitudemarkers.all(),
space.wifi_measurements.all(),
space.pois.filter(POI.q_for_request(request)).prefetch_related('groups'),
other_spaces_upper,
graphedges,
graphnodes
)
return Response([obj.to_geojson(instance=obj) for obj in results])
else:
raise ValidationError('No level or space specified.')
@list_route(methods=['get'])
@api_etag(etag_func=MapUpdate.current_cache_key, cache_parameters={})
def geometrystyles(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
return Response({
'building': '#aaaaaa',
'space': '#eeeeee',
'hole': 'rgba(255, 0, 0, 0.3)',
'door': '#ffffff',
'area': '#55aaff',
'stair': '#a000a0',
'ramp': 'rgba(160, 0, 160, 0.2)',
'obstacle': '#999999',
'lineobstacle': '#999999',
'column': '#888888',
'poi': '#4488cc',
'shadow': '#000000',
'graphnode': '#009900',
'graphedge': '#00CC00',
'altitudemarker': '#0000FF',
'wifimeasurement': '#DDDD00',
})
@list_route(methods=['get'])
@api_etag(etag_func=etag_func, cache_parameters={})
def bounds(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
return Response({
'bounds': Source.max_bounds(),
})
class ChangeSetViewSet(ReadOnlyModelViewSet):
"""
List change sets
/current/ returns the current changeset.
"""
queryset = ChangeSet.objects.all()
def get_queryset(self):
return ChangeSet.qs_for_request(self.request).select_related('last_update', 'last_state_update', 'last_change')
def list(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
return Response([obj.serialize() for obj in self.get_queryset().order_by('id')])
def retrieve(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
return Response(self.get_object().serialize())
@list_route(methods=['get'])
def current(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
changeset = ChangeSet.get_for_request(request)
return Response(changeset.serialize())
@detail_route(methods=['get'])
def changes(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
changeset = self.get_object()
changeset.fill_changes_cache()
return Response([obj.serialize() for obj in changeset.iter_changed_objects()])
| 45.106007
| 119
| 0.627889
|
from itertools import chain
from django.db.models import Prefetch, Q
from rest_framework.decorators import detail_route, list_route
from rest_framework.exceptions import PermissionDenied, ValidationError
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet, ViewSet
from shapely.ops import cascaded_union
from c3nav.editor.models import ChangeSet
from c3nav.editor.views.base import etag_func
from c3nav.mapdata.api import api_etag
from c3nav.mapdata.models import Area, Door, MapUpdate, Source
from c3nav.mapdata.models.geometry.space import POI
from c3nav.mapdata.utils.user import can_access_editor
class EditorViewSet(ViewSet):
@staticmethod
def _get_level_geometries(level):
buildings = level.buildings.all()
buildings_geom = cascaded_union([building.geometry for building in buildings])
spaces = {space.pk: space for space in level.spaces.all()}
holes_geom = []
for space in spaces.values():
if space.outside:
space.geometry = space.geometry.difference(buildings_geom)
columns_geom = cascaded_union([column.geometry for column in space.columns.all()])
space.geometry = space.geometry.difference(columns_geom)
space_holes_geom = cascaded_union([hole.geometry for hole in space.holes.all()])
holes_geom.append(space_holes_geom.intersection(space.geometry))
space.geometry = space.geometry.difference(space_holes_geom)
holes_geom = cascaded_union(holes_geom)
for building in buildings:
building.original_geometry = building.geometry
for obj in buildings:
obj.geometry = obj.geometry.difference(holes_geom)
results = []
results.extend(buildings)
for door in level.doors.all():
results.append(door)
results.extend(spaces.values())
return results
@staticmethod
def _get_levels_pk(request, level):
Level = request.changeset.wrap_model('Level')
levels_under = ()
levels_on_top = ()
lower_level = level.lower(Level).first()
primary_levels = (level,) + ((lower_level,) if lower_level else ())
secondary_levels = Level.objects.filter(on_top_of__in=primary_levels).values_list('pk', 'on_top_of')
if lower_level:
levels_under = tuple(pk for pk, on_top_of in secondary_levels if on_top_of == lower_level.pk)
if True:
levels_on_top = tuple(pk for pk, on_top_of in secondary_levels if on_top_of == level.pk)
levels = chain([level.pk], levels_under, levels_on_top)
return levels, levels_on_top, levels_under
@list_route(methods=['get'])
@api_etag(etag_func=etag_func, cache_parameters={'level': str, 'space': str})
def geometries(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
Level = request.changeset.wrap_model('Level')
Space = request.changeset.wrap_model('Space')
level = request.GET.get('level')
space = request.GET.get('space')
if level is not None:
if space is not None:
raise ValidationError('Only level or space can be specified.')
level = get_object_or_404(Level.objects.filter(Level.q_for_request(request)), pk=level)
levels, levels_on_top, levels_under = self._get_levels_pk(request, level)
levels = Level.objects.filter(pk__in=levels).filter(Level.q_for_request(request))
# graphnodes_qs = request.changeset.wrap_model('GraphNode').objects.all()
levels = levels.prefetch_related(
Prefetch('spaces', request.changeset.wrap_model('Space').objects.filter(Space.q_for_request(request))),
Prefetch('doors', request.changeset.wrap_model('Door').objects.filter(Door.q_for_request(request))),
'buildings', 'spaces__holes', 'spaces__groups', 'spaces__columns', 'spaces__altitudemarkers',
# Prefetch('spaces__graphnodes', graphnodes_qs)
)
levels = {s.pk: s for s in levels}
level = levels[level.pk]
levels_under = [levels[pk] for pk in levels_under]
levels_on_top = [levels[pk] for pk in levels_on_top]
# todo: permissions
# graphnodes = tuple(chain(*(space.graphnodes.all()
# for space in chain(*(level.spaces.all() for level in levels.values())))))
# graphnodes_lookup = {node.pk: node for node in graphnodes}
# graphedges = request.changeset.wrap_model('GraphEdge').objects.all()
# graphedges = graphedges.filter(Q(from_node__in=graphnodes) | Q(to_node__in=graphnodes))
# graphedges = graphedges.select_related('waytype')
# this is faster because we only deserialize graphnode geometries once
# missing_graphnodes = graphnodes_qs.filter(pk__in=set(chain(*((edge.from_node_id, edge.to_node_id)
# for edge in graphedges))))
# graphnodes_lookup.update({node.pk: node for node in missing_graphnodes})
# for edge in graphedges:
# edge._from_node_cache = graphnodes_lookup[edge.from_node_id]
# edge._to_node_cache = graphnodes_lookup[edge.to_node_id]
# graphedges = [edge for edge in graphedges if edge.from_node.space_id != edge.to_node.space_id]
results = chain(
*(self._get_level_geometries(l) for l in levels_under),
self._get_level_geometries(level),
*(self._get_level_geometries(l) for l in levels_on_top),
*(space.altitudemarkers.all() for space in level.spaces.all()),
# graphedges,
# graphnodes,
)
return Response([obj.to_geojson(instance=obj) for obj in results])
elif space is not None:
space_q_for_request = Space.q_for_request(request)
qs = Space.objects.filter(space_q_for_request)
space = get_object_or_404(qs.select_related('level', 'level__on_top_of'), pk=space)
level = space.level
doors = [door for door in level.doors.filter(Door.q_for_request(request)).all()
if door.geometry.intersects(space.geometry)]
doors_space_geom = cascaded_union([door.geometry for door in doors]+[space.geometry])
levels, levels_on_top, levels_under = self._get_levels_pk(request, level.primary_level)
if level.on_top_of_id is not None:
levels = chain([level.pk], levels_on_top)
other_spaces = Space.objects.filter(space_q_for_request, level__pk__in=levels).prefetch_related('groups')
space = next(s for s in other_spaces if s.pk == space.pk)
other_spaces = [s for s in other_spaces
if s.geometry.intersects(doors_space_geom) and s.pk != space.pk]
all_other_spaces = other_spaces
if level.on_top_of_id is None:
other_spaces_lower = [s for s in other_spaces if s.level_id in levels_under]
other_spaces_upper = [s for s in other_spaces if s.level_id in levels_on_top]
else:
other_spaces_lower = [s for s in other_spaces if s.level_id == level.on_top_of_id]
other_spaces_upper = []
other_spaces = [s for s in other_spaces if s.level_id == level.pk]
space.bounds = True
buildings = level.buildings.all()
buildings_geom = cascaded_union([building.geometry for building in buildings])
for other_space in other_spaces:
if other_space.outside:
other_space.geometry = other_space.geometry.difference(buildings_geom)
for other_space in chain(other_spaces, other_spaces_lower, other_spaces_upper):
other_space.opacity = 0.4
other_space.color = '
for building in buildings:
building.opacity = 0.5
# todo: permissions
graphnodes = request.changeset.wrap_model('GraphNode').objects.all()
graphnodes = graphnodes.filter((Q(space__in=all_other_spaces)) | Q(space__pk=space.pk))
space_graphnodes = tuple(node for node in graphnodes if node.space_id == space.pk)
graphedges = request.changeset.wrap_model('GraphEdge').objects.all()
graphedges = graphedges.filter(Q(from_node__in=space_graphnodes) | Q(to_node__in=space_graphnodes))
graphedges = graphedges.select_related('from_node', 'to_node', 'waytype')
areas = space.areas.filter(Area.q_for_request(request)).prefetch_related('groups')
for area in areas:
area.opacity = 0.5
results = chain(
buildings,
other_spaces_lower,
doors,
other_spaces,
[space],
areas,
space.holes.all(),
space.stairs.all(),
space.ramps.all(),
space.obstacles.all(),
space.lineobstacles.all(),
space.columns.all(),
space.altitudemarkers.all(),
space.wifi_measurements.all(),
space.pois.filter(POI.q_for_request(request)).prefetch_related('groups'),
other_spaces_upper,
graphedges,
graphnodes
)
return Response([obj.to_geojson(instance=obj) for obj in results])
else:
raise ValidationError('No level or space specified.')
@list_route(methods=['get'])
@api_etag(etag_func=MapUpdate.current_cache_key, cache_parameters={})
def geometrystyles(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
return Response({
'building': '
'space': '
'hole': 'rgba(255, 0, 0, 0.3)',
'door': '
'area': '
'stair': '
'ramp': 'rgba(160, 0, 160, 0.2)',
'obstacle': '
'lineobstacle': '
'column': '
'poi': '
'shadow': '
'graphnode': '
'graphedge': '
'altitudemarker': '
'wifimeasurement': '
})
@list_route(methods=['get'])
@api_etag(etag_func=etag_func, cache_parameters={})
def bounds(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
return Response({
'bounds': Source.max_bounds(),
})
class ChangeSetViewSet(ReadOnlyModelViewSet):
queryset = ChangeSet.objects.all()
def get_queryset(self):
return ChangeSet.qs_for_request(self.request).select_related('last_update', 'last_state_update', 'last_change')
def list(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
return Response([obj.serialize() for obj in self.get_queryset().order_by('id')])
def retrieve(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
return Response(self.get_object().serialize())
@list_route(methods=['get'])
def current(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
changeset = ChangeSet.get_for_request(request)
return Response(changeset.serialize())
@detail_route(methods=['get'])
def changes(self, request, *args, **kwargs):
if not can_access_editor(request):
return PermissionDenied
changeset = self.get_object()
changeset.fill_changes_cache()
return Response([obj.serialize() for obj in changeset.iter_changed_objects()])
| true
| true
|
1c4329a9bd36f09a7c5e52e9bfeb15c30d5395fb
| 3,766
|
py
|
Python
|
python/smap/drivers/washingtonbpa.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | null | null | null |
python/smap/drivers/washingtonbpa.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | null | null | null |
python/smap/drivers/washingtonbpa.py
|
carlosduarteroa/smap
|
5760631dfaf3e85da26ce68bf542bf254bb92c80
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
'''
sMAP feed for BPA Total Wind, Hydro, and Thermal Generation.
@author Gabe Fierro
'''
import urllib2
import logging
from smap.driver import SmapDriver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
class BPADriver(SmapDriver):
'''
Scrape feed from BPA site and parse as a sMAP feed. BPA updates approximately every 5 minutes so we
update every 2.5 minutes to make sure we catch all the updates (updates are correctly timestamped
in increments of 5 minutes). We parse wind, hydro and thermal feeds.
'''
def setup(self, opts):
self.w = self.add_timeseries('/wind','MW',description='Total Wind Generation')
self.h = self.add_timeseries('/hydro','MW',description='Total Hydro Generation')
self.t = self.add_timeseries('/thermal','MW',description='Total Thermal Generation')
self.l = self.add_timeseries('/load','MW',description='Total Load')
self.set_metadata = {
'Location' : {'State': 'WA', 'Uri': 'http://transmission.bpa.gov/business/operations/wind/baltwg.txt'}
}
self.previousTime = 0
def start(self):
periodicSequentialCall(self.read).start(5*30) # updates every 2.5 minutes
def read(self):
object_ = {}
print('read running')
try:
#get the text from the ur
wa = urllib2.urlopen('http://transmission.bpa.gov/business/operations/wind/baltwg.txt')
data = [line for line in wa.readlines()[7:] if len(line.split()) > 3]
#parse most recent data
rawTime = " ".join(data[-1].split()[:2])
currentTime = int(dtutil.dt2ts(dtutil.strptime_tz(rawTime,"%m/%d/%Y %H:%M",'US/Pacific')))
object_["Wind"] = data[-1].split()[3]
object_["Hydro"] = data[-1].split()[4]
object_["Thermal"] = data[-1].split()[5]
object_["Load"] = data[-1].split()[2]
except Exception as e:
logging.exception(type(e))
print(e)
else:
if currentTime != self.previousTime:
self.w.add(currentTime,int(object_["Wind"]))
self.h.add(currentTime,int(object_["Hydro"]))
self.t.add(currentTime,int(object_["Thermal"]))
self.l.add(currentTime,int(object_["Load"]))
self.previousTime = currentTime
wa.close()
| 44.305882
| 114
| 0.674721
|
import urllib2
import logging
from smap.driver import SmapDriver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
class BPADriver(SmapDriver):
def setup(self, opts):
self.w = self.add_timeseries('/wind','MW',description='Total Wind Generation')
self.h = self.add_timeseries('/hydro','MW',description='Total Hydro Generation')
self.t = self.add_timeseries('/thermal','MW',description='Total Thermal Generation')
self.l = self.add_timeseries('/load','MW',description='Total Load')
self.set_metadata = {
'Location' : {'State': 'WA', 'Uri': 'http://transmission.bpa.gov/business/operations/wind/baltwg.txt'}
}
self.previousTime = 0
def start(self):
periodicSequentialCall(self.read).start(5*30)
def read(self):
object_ = {}
print('read running')
try:
wa = urllib2.urlopen('http://transmission.bpa.gov/business/operations/wind/baltwg.txt')
data = [line for line in wa.readlines()[7:] if len(line.split()) > 3]
rawTime = " ".join(data[-1].split()[:2])
currentTime = int(dtutil.dt2ts(dtutil.strptime_tz(rawTime,"%m/%d/%Y %H:%M",'US/Pacific')))
object_["Wind"] = data[-1].split()[3]
object_["Hydro"] = data[-1].split()[4]
object_["Thermal"] = data[-1].split()[5]
object_["Load"] = data[-1].split()[2]
except Exception as e:
logging.exception(type(e))
print(e)
else:
if currentTime != self.previousTime:
self.w.add(currentTime,int(object_["Wind"]))
self.h.add(currentTime,int(object_["Hydro"]))
self.t.add(currentTime,int(object_["Thermal"]))
self.l.add(currentTime,int(object_["Load"]))
self.previousTime = currentTime
wa.close()
| true
| true
|
1c4329acb597363d5b87ee67cdeb44ad2032ba5e
| 517
|
py
|
Python
|
reloadAll.py
|
elpie89/MaxToolsUpdater
|
a8ba5437b3005bbc79992f0ac7a8723b68680525
|
[
"Apache-2.0"
] | null | null | null |
reloadAll.py
|
elpie89/MaxToolsUpdater
|
a8ba5437b3005bbc79992f0ac7a8723b68680525
|
[
"Apache-2.0"
] | null | null | null |
reloadAll.py
|
elpie89/MaxToolsUpdater
|
a8ba5437b3005bbc79992f0ac7a8723b68680525
|
[
"Apache-2.0"
] | null | null | null |
import os # we use os.path.join, os.path.basename
import sys # we use sys.path
import glob # we use glob.glob
import importlib # we use importlib.import_module
projectFolder = os.path.join(os.path.dirname(__file__),"src")
sys.path.append(projectFolder) # this tells python to look in `import_folder` for imports
for src_file in glob.glob(os.path.join(projectFolder, '*.py')):
name = os.path.basename(src_file)[:-3]
importlib.import_module(name)
reload(sys.modules[name])
importlib.import_module(name)
| 43.083333
| 89
| 0.748549
|
import os
import sys
import glob
import importlib
projectFolder = os.path.join(os.path.dirname(__file__),"src")
sys.path.append(projectFolder)
for src_file in glob.glob(os.path.join(projectFolder, '*.py')):
name = os.path.basename(src_file)[:-3]
importlib.import_module(name)
reload(sys.modules[name])
importlib.import_module(name)
| true
| true
|
1c4329c02e4844c5e0af2d6a1ba24d97c83766f1
| 566
|
py
|
Python
|
Python/PythonApp/rename.py
|
nanhuayu/hello-world
|
4c97477d72cc5d46b65ab3a36b10f6b7dfff3e95
|
[
"MIT"
] | null | null | null |
Python/PythonApp/rename.py
|
nanhuayu/hello-world
|
4c97477d72cc5d46b65ab3a36b10f6b7dfff3e95
|
[
"MIT"
] | null | null | null |
Python/PythonApp/rename.py
|
nanhuayu/hello-world
|
4c97477d72cc5d46b65ab3a36b10f6b7dfff3e95
|
[
"MIT"
] | null | null | null |
#-*- coding: UTF-8 -*-
import os;
def rename():
count = 0;
path=os.getcwd();
filelist=os.listdir(path)#该文件夹下所有的文件(包括文件夹)
for files in filelist:#遍历所有文件
Olddir=os.path.join(path,files);#原来的文件路径
if os.path.isdir(Olddir):#如果是文件夹则跳过
continue;
filename=os.path.splitext(files)[0];#文件名
filetype=os.path.splitext(files)[1];#文件扩展名
if filetype == '.py':
continue;
Newdir=os.path.join(path,filename+filetype+'.jpg');#新的文件路径
os.rename(Olddir,Newdir);#重命名
count+=1;
rename();
| 25.727273
| 66
| 0.595406
|
import os;
def rename():
count = 0;
path=os.getcwd();
filelist=os.listdir(path)
for files in filelist:
Olddir=os.path.join(path,files);
if os.path.isdir(Olddir):
continue;
filename=os.path.splitext(files)[0];
filetype=os.path.splitext(files)[1];
if filetype == '.py':
continue;
Newdir=os.path.join(path,filename+filetype+'.jpg');
os.rename(Olddir,Newdir);
count+=1;
rename();
| true
| true
|
1c432a70566fcc28b0fa0efcb500e4f4da1ac4c8
| 275
|
py
|
Python
|
17.Python for Automation/04.Automating with APIs/02.working_with_API_keys.py
|
ptyadana/python-dojo
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
[
"MIT"
] | 3
|
2020-06-01T04:17:18.000Z
|
2020-12-18T03:05:55.000Z
|
17.Python for Automation/04.Automating with APIs/02.working_with_API_keys.py
|
ptyadana/python-dojo
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
[
"MIT"
] | 1
|
2020-04-25T08:01:59.000Z
|
2020-04-25T08:01:59.000Z
|
17.Python for Automation/04.Automating with APIs/02.working_with_API_keys.py
|
ptyadana/python-dojo
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
[
"MIT"
] | 7
|
2020-04-26T10:02:36.000Z
|
2021-06-08T05:12:46.000Z
|
import requests
import json
base_url = "http://api.openweathermap.org/data/2.5/forecast"
APP_ID = "your_own_id"
parameters = {"appid": APP_ID, "q": "Singapore"}
response = requests.get(base_url, params=parameters)
print(json.dumps(json.loads(response.content), indent=1))
| 25
| 60
| 0.749091
|
import requests
import json
base_url = "http://api.openweathermap.org/data/2.5/forecast"
APP_ID = "your_own_id"
parameters = {"appid": APP_ID, "q": "Singapore"}
response = requests.get(base_url, params=parameters)
print(json.dumps(json.loads(response.content), indent=1))
| true
| true
|
1c432a895617a75e605c71e8d82467918f9d18b3
| 1,287
|
py
|
Python
|
brmflask/blueprints/static/views.py
|
BRMWebDev/BRMFlask
|
203031aae8a2d2db3c435bb6b39ccda6a90913a1
|
[
"MIT"
] | 1
|
2016-09-14T19:20:07.000Z
|
2016-09-14T19:20:07.000Z
|
brmflask/blueprints/static/views.py
|
BRMWebDev/BRMFlask
|
203031aae8a2d2db3c435bb6b39ccda6a90913a1
|
[
"MIT"
] | 1
|
2018-06-12T14:06:01.000Z
|
2018-06-12T14:06:01.000Z
|
brmflask/blueprints/static/views.py
|
brmullikin/BRMFlask
|
203031aae8a2d2db3c435bb6b39ccda6a90913a1
|
[
"MIT"
] | null | null | null |
"""Blueprint: static views."""
from flask import (
make_response,
render_template,
jsonify,
current_app,
abort
)
from brmflask.utils.routing import template_path
from . import static
@static.route('/list-configs')
def list_configs():
"""Return the config dictionary if in Debug mode."""
if current_app.debug:
return jsonify(current_app.config)
else:
abort(404)
@static.route('/humans.txt')
def humans():
"""Return Humans readable information about the website."""
if current_app.config['STATIC_ROUTES'].get('humans', None):
response = make_response(
render_template(
template_path(current_app.config['STATIC_ROUTES']['humans'])
)
)
response.headers['Content-type'] = "text/plain"
return response
else:
abort(404)
@static.route('/robots.txt')
def robots():
"""Robot Crawler txt for search engines."""
if current_app.config['STATIC_ROUTES'].get('robots', None):
response = make_response(
render_template(
template_path(current_app.config['STATIC_ROUTES']['robots'])
)
)
response.headers['Content-type'] = "text/plain"
return response
else:
abort(404)
| 25.74
| 76
| 0.61927
|
from flask import (
make_response,
render_template,
jsonify,
current_app,
abort
)
from brmflask.utils.routing import template_path
from . import static
@static.route('/list-configs')
def list_configs():
if current_app.debug:
return jsonify(current_app.config)
else:
abort(404)
@static.route('/humans.txt')
def humans():
if current_app.config['STATIC_ROUTES'].get('humans', None):
response = make_response(
render_template(
template_path(current_app.config['STATIC_ROUTES']['humans'])
)
)
response.headers['Content-type'] = "text/plain"
return response
else:
abort(404)
@static.route('/robots.txt')
def robots():
if current_app.config['STATIC_ROUTES'].get('robots', None):
response = make_response(
render_template(
template_path(current_app.config['STATIC_ROUTES']['robots'])
)
)
response.headers['Content-type'] = "text/plain"
return response
else:
abort(404)
| true
| true
|
1c432aaff07554254b56f50f567f20d8c2595cdc
| 7,611
|
py
|
Python
|
readability_transformers/features/lf/Syntactic/PhrF.py
|
OneTheta/readability-transformers
|
3c122c98a90c67add8eafad16563b269d5e3124a
|
[
"Apache-2.0"
] | 1
|
2022-01-26T10:55:59.000Z
|
2022-01-26T10:55:59.000Z
|
readability_transformers/features/lf/Syntactic/PhrF.py
|
OneTheta/readability-transformers
|
3c122c98a90c67add8eafad16563b269d5e3124a
|
[
"Apache-2.0"
] | null | null | null |
readability_transformers/features/lf/Syntactic/PhrF.py
|
OneTheta/readability-transformers
|
3c122c98a90c67add8eafad16563b269d5e3124a
|
[
"Apache-2.0"
] | 2
|
2021-10-14T22:53:57.000Z
|
2022-01-26T10:53:32.000Z
|
# -*- coding: UTF-8 -*-
"""
Software: LingFeat - Comprehensive Linguistic Features for Readability Assessment
Page: PhrF.py (Phrasal Features)
License: CC-BY-SA 4.0
Original Author: Bruce W. Lee (이웅성) @brucewlee
Affiliation 1: LXPER AI, Seoul, South Korea
Affiliation 2: University of Pennsylvania, PA, USA
Contributing Author: -
Affiliation : -
References:
>>> Phrasal features inspired by
Publication 1: Feng, Lijun, Martin Jansche, Matt Huenerfauth, and Noémie Elhadad. "A Comparison of Features for Automatic Readability Assessment." In Coling 2010: Posters, pp. 276-284. 2010.
Publication 2: Lu, Xiaofei. "Automatic analysis of syntactic complexity in second language writing." International journal of corpus linguistics 15, no. 4 (2010): 474-496.
"""
from ..utils import division
def retrieve(SuPar, sent_token_list, n_token, n_sent):
to_NoPhr_C = 0
to_VePhr_C = 0
to_SuPhr_C = 0
to_PrPhr_C = 0
to_AjPhr_C = 0
to_AvPhr_C = 0
for sent in sent_token_list:
dataset = SuPar.predict([sent], prob=True, verbose=False)
parsed_tree = str(dataset.sentences)
to_NoPhr_C += parsed_tree.count("NP")
to_VePhr_C += parsed_tree.count("VP")
to_SuPhr_C += parsed_tree.count("SBAR")
to_PrPhr_C += parsed_tree.count("PP")
to_AjPhr_C += parsed_tree.count("ADJP")
to_AvPhr_C += parsed_tree.count("ADVP")
result = {
"to_NoPhr_C": to_NoPhr_C,
"as_NoPhr_C": float(division(to_NoPhr_C,n_sent)),
"at_NoPhr_C": float(division(to_NoPhr_C,n_token)),
"ra_NoVeP_C": float(division(to_NoPhr_C,to_VePhr_C)),
"ra_NoSuP_C": float(division(to_NoPhr_C,to_SuPhr_C)),
"ra_NoPrP_C": float(division(to_NoPhr_C,to_PrPhr_C)),
"ra_NoAjP_C": float(division(to_NoPhr_C,to_AjPhr_C)),
"ra_NoAvP_C": float(division(to_NoPhr_C,to_AvPhr_C)),
"to_VePhr_C": to_VePhr_C,
"as_VePhr_C": float(division(to_VePhr_C,n_sent)),
"at_VePhr_C": float(division(to_VePhr_C,n_token)),
"ra_VeNoP_C": float(division(to_VePhr_C,to_NoPhr_C)),
"ra_VeSuP_C": float(division(to_VePhr_C,to_SuPhr_C)),
"ra_VePrP_C": float(division(to_VePhr_C,to_PrPhr_C)),
"ra_VeAjP_C": float(division(to_VePhr_C,to_AjPhr_C)),
"ra_VeAvP_C": float(division(to_VePhr_C,to_AvPhr_C)),
"to_SuPhr_C": to_SuPhr_C,
"as_SuPhr_C": float(division(to_SuPhr_C,n_sent)),
"at_SuPhr_C": float(division(to_SuPhr_C,n_token)),
"ra_SuNoP_C": float(division(to_SuPhr_C,to_NoPhr_C)),
"ra_SuVeP_C": float(division(to_SuPhr_C,to_VePhr_C)),
"ra_SuPrP_C": float(division(to_SuPhr_C,to_PrPhr_C)),
"ra_SuAjP_C": float(division(to_SuPhr_C,to_AjPhr_C)),
"ra_SuAvP_C": float(division(to_SuPhr_C,to_AvPhr_C)),
"to_PrPhr_C": to_PrPhr_C,
"as_PrPhr_C": float(division(to_PrPhr_C,n_sent)),
"at_PrPhr_C": float(division(to_PrPhr_C,n_token)),
"ra_PrNoP_C": float(division(to_PrPhr_C,to_NoPhr_C)),
"ra_PrVeP_C": float(division(to_PrPhr_C,to_VePhr_C)),
"ra_PrSuP_C": float(division(to_PrPhr_C,to_SuPhr_C)),
"ra_PrAjP_C": float(division(to_PrPhr_C,to_AjPhr_C)),
"ra_PrAvP_C": float(division(to_PrPhr_C,to_AvPhr_C)),
"to_AjPhr_C": to_AjPhr_C,
"as_AjPhr_C": float(division(to_AjPhr_C,n_sent)),
"at_AjPhr_C": float(division(to_AjPhr_C,n_token)),
"ra_AjNoP_C": float(division(to_AjPhr_C,to_NoPhr_C)),
"ra_AjVeP_C": float(division(to_AjPhr_C,to_VePhr_C)),
"ra_AjSuP_C": float(division(to_AjPhr_C,to_SuPhr_C)),
"ra_AjPrP_C": float(division(to_AjPhr_C,to_PrPhr_C)),
"ra_AjAvP_C": float(division(to_AjPhr_C,to_AvPhr_C)),
"to_AvPhr_C": to_AvPhr_C,
"as_AvPhr_C": float(division(to_AvPhr_C,n_sent)),
"at_AvPhr_C": float(division(to_AvPhr_C,n_token)),
"ra_AvNoP_C": float(division(to_AvPhr_C,to_NoPhr_C)),
"ra_AvVeP_C": float(division(to_AvPhr_C,to_VePhr_C)),
"ra_AvSuP_C": float(division(to_AvPhr_C,to_SuPhr_C)),
"ra_AvPrP_C": float(division(to_AvPhr_C,to_PrPhr_C)),
"ra_AvAjP_C": float(division(to_AvPhr_C,to_AjPhr_C)),
}
return result
def retrieve_supar_optimized(dataset_list, sent_token_list, n_token, n_sent):
to_NoPhr_C = 0
to_VePhr_C = 0
to_SuPhr_C = 0
to_PrPhr_C = 0
to_AjPhr_C = 0
to_AvPhr_C = 0
for idx, sent in enumerate(sent_token_list):
dataset = dataset_list[idx]
parsed_tree = str(dataset.sentences)
to_NoPhr_C += parsed_tree.count("NP")
to_VePhr_C += parsed_tree.count("VP")
to_SuPhr_C += parsed_tree.count("SBAR")
to_PrPhr_C += parsed_tree.count("PP")
to_AjPhr_C += parsed_tree.count("ADJP")
to_AvPhr_C += parsed_tree.count("ADVP")
result = {
"to_NoPhr_C": to_NoPhr_C,
"as_NoPhr_C": float(division(to_NoPhr_C,n_sent)),
"at_NoPhr_C": float(division(to_NoPhr_C,n_token)),
"ra_NoVeP_C": float(division(to_NoPhr_C,to_VePhr_C)),
"ra_NoSuP_C": float(division(to_NoPhr_C,to_SuPhr_C)),
"ra_NoPrP_C": float(division(to_NoPhr_C,to_PrPhr_C)),
"ra_NoAjP_C": float(division(to_NoPhr_C,to_AjPhr_C)),
"ra_NoAvP_C": float(division(to_NoPhr_C,to_AvPhr_C)),
"to_VePhr_C": to_VePhr_C,
"as_VePhr_C": float(division(to_VePhr_C,n_sent)),
"at_VePhr_C": float(division(to_VePhr_C,n_token)),
"ra_VeNoP_C": float(division(to_VePhr_C,to_NoPhr_C)),
"ra_VeSuP_C": float(division(to_VePhr_C,to_SuPhr_C)),
"ra_VePrP_C": float(division(to_VePhr_C,to_PrPhr_C)),
"ra_VeAjP_C": float(division(to_VePhr_C,to_AjPhr_C)),
"ra_VeAvP_C": float(division(to_VePhr_C,to_AvPhr_C)),
"to_SuPhr_C": to_SuPhr_C,
"as_SuPhr_C": float(division(to_SuPhr_C,n_sent)),
"at_SuPhr_C": float(division(to_SuPhr_C,n_token)),
"ra_SuNoP_C": float(division(to_SuPhr_C,to_NoPhr_C)),
"ra_SuVeP_C": float(division(to_SuPhr_C,to_VePhr_C)),
"ra_SuPrP_C": float(division(to_SuPhr_C,to_PrPhr_C)),
"ra_SuAjP_C": float(division(to_SuPhr_C,to_AjPhr_C)),
"ra_SuAvP_C": float(division(to_SuPhr_C,to_AvPhr_C)),
"to_PrPhr_C": to_PrPhr_C,
"as_PrPhr_C": float(division(to_PrPhr_C,n_sent)),
"at_PrPhr_C": float(division(to_PrPhr_C,n_token)),
"ra_PrNoP_C": float(division(to_PrPhr_C,to_NoPhr_C)),
"ra_PrVeP_C": float(division(to_PrPhr_C,to_VePhr_C)),
"ra_PrSuP_C": float(division(to_PrPhr_C,to_SuPhr_C)),
"ra_PrAjP_C": float(division(to_PrPhr_C,to_AjPhr_C)),
"ra_PrAvP_C": float(division(to_PrPhr_C,to_AvPhr_C)),
"to_AjPhr_C": to_AjPhr_C,
"as_AjPhr_C": float(division(to_AjPhr_C,n_sent)),
"at_AjPhr_C": float(division(to_AjPhr_C,n_token)),
"ra_AjNoP_C": float(division(to_AjPhr_C,to_NoPhr_C)),
"ra_AjVeP_C": float(division(to_AjPhr_C,to_VePhr_C)),
"ra_AjSuP_C": float(division(to_AjPhr_C,to_SuPhr_C)),
"ra_AjPrP_C": float(division(to_AjPhr_C,to_PrPhr_C)),
"ra_AjAvP_C": float(division(to_AjPhr_C,to_AvPhr_C)),
"to_AvPhr_C": to_AvPhr_C,
"as_AvPhr_C": float(division(to_AvPhr_C,n_sent)),
"at_AvPhr_C": float(division(to_AvPhr_C,n_token)),
"ra_AvNoP_C": float(division(to_AvPhr_C,to_NoPhr_C)),
"ra_AvVeP_C": float(division(to_AvPhr_C,to_VePhr_C)),
"ra_AvSuP_C": float(division(to_AvPhr_C,to_SuPhr_C)),
"ra_AvPrP_C": float(division(to_AvPhr_C,to_PrPhr_C)),
"ra_AvAjP_C": float(division(to_AvPhr_C,to_AjPhr_C)),
}
return result
| 46.127273
| 190
| 0.681119
|
from ..utils import division
def retrieve(SuPar, sent_token_list, n_token, n_sent):
to_NoPhr_C = 0
to_VePhr_C = 0
to_SuPhr_C = 0
to_PrPhr_C = 0
to_AjPhr_C = 0
to_AvPhr_C = 0
for sent in sent_token_list:
dataset = SuPar.predict([sent], prob=True, verbose=False)
parsed_tree = str(dataset.sentences)
to_NoPhr_C += parsed_tree.count("NP")
to_VePhr_C += parsed_tree.count("VP")
to_SuPhr_C += parsed_tree.count("SBAR")
to_PrPhr_C += parsed_tree.count("PP")
to_AjPhr_C += parsed_tree.count("ADJP")
to_AvPhr_C += parsed_tree.count("ADVP")
result = {
"to_NoPhr_C": to_NoPhr_C,
"as_NoPhr_C": float(division(to_NoPhr_C,n_sent)),
"at_NoPhr_C": float(division(to_NoPhr_C,n_token)),
"ra_NoVeP_C": float(division(to_NoPhr_C,to_VePhr_C)),
"ra_NoSuP_C": float(division(to_NoPhr_C,to_SuPhr_C)),
"ra_NoPrP_C": float(division(to_NoPhr_C,to_PrPhr_C)),
"ra_NoAjP_C": float(division(to_NoPhr_C,to_AjPhr_C)),
"ra_NoAvP_C": float(division(to_NoPhr_C,to_AvPhr_C)),
"to_VePhr_C": to_VePhr_C,
"as_VePhr_C": float(division(to_VePhr_C,n_sent)),
"at_VePhr_C": float(division(to_VePhr_C,n_token)),
"ra_VeNoP_C": float(division(to_VePhr_C,to_NoPhr_C)),
"ra_VeSuP_C": float(division(to_VePhr_C,to_SuPhr_C)),
"ra_VePrP_C": float(division(to_VePhr_C,to_PrPhr_C)),
"ra_VeAjP_C": float(division(to_VePhr_C,to_AjPhr_C)),
"ra_VeAvP_C": float(division(to_VePhr_C,to_AvPhr_C)),
"to_SuPhr_C": to_SuPhr_C,
"as_SuPhr_C": float(division(to_SuPhr_C,n_sent)),
"at_SuPhr_C": float(division(to_SuPhr_C,n_token)),
"ra_SuNoP_C": float(division(to_SuPhr_C,to_NoPhr_C)),
"ra_SuVeP_C": float(division(to_SuPhr_C,to_VePhr_C)),
"ra_SuPrP_C": float(division(to_SuPhr_C,to_PrPhr_C)),
"ra_SuAjP_C": float(division(to_SuPhr_C,to_AjPhr_C)),
"ra_SuAvP_C": float(division(to_SuPhr_C,to_AvPhr_C)),
"to_PrPhr_C": to_PrPhr_C,
"as_PrPhr_C": float(division(to_PrPhr_C,n_sent)),
"at_PrPhr_C": float(division(to_PrPhr_C,n_token)),
"ra_PrNoP_C": float(division(to_PrPhr_C,to_NoPhr_C)),
"ra_PrVeP_C": float(division(to_PrPhr_C,to_VePhr_C)),
"ra_PrSuP_C": float(division(to_PrPhr_C,to_SuPhr_C)),
"ra_PrAjP_C": float(division(to_PrPhr_C,to_AjPhr_C)),
"ra_PrAvP_C": float(division(to_PrPhr_C,to_AvPhr_C)),
"to_AjPhr_C": to_AjPhr_C,
"as_AjPhr_C": float(division(to_AjPhr_C,n_sent)),
"at_AjPhr_C": float(division(to_AjPhr_C,n_token)),
"ra_AjNoP_C": float(division(to_AjPhr_C,to_NoPhr_C)),
"ra_AjVeP_C": float(division(to_AjPhr_C,to_VePhr_C)),
"ra_AjSuP_C": float(division(to_AjPhr_C,to_SuPhr_C)),
"ra_AjPrP_C": float(division(to_AjPhr_C,to_PrPhr_C)),
"ra_AjAvP_C": float(division(to_AjPhr_C,to_AvPhr_C)),
"to_AvPhr_C": to_AvPhr_C,
"as_AvPhr_C": float(division(to_AvPhr_C,n_sent)),
"at_AvPhr_C": float(division(to_AvPhr_C,n_token)),
"ra_AvNoP_C": float(division(to_AvPhr_C,to_NoPhr_C)),
"ra_AvVeP_C": float(division(to_AvPhr_C,to_VePhr_C)),
"ra_AvSuP_C": float(division(to_AvPhr_C,to_SuPhr_C)),
"ra_AvPrP_C": float(division(to_AvPhr_C,to_PrPhr_C)),
"ra_AvAjP_C": float(division(to_AvPhr_C,to_AjPhr_C)),
}
return result
def retrieve_supar_optimized(dataset_list, sent_token_list, n_token, n_sent):
to_NoPhr_C = 0
to_VePhr_C = 0
to_SuPhr_C = 0
to_PrPhr_C = 0
to_AjPhr_C = 0
to_AvPhr_C = 0
for idx, sent in enumerate(sent_token_list):
dataset = dataset_list[idx]
parsed_tree = str(dataset.sentences)
to_NoPhr_C += parsed_tree.count("NP")
to_VePhr_C += parsed_tree.count("VP")
to_SuPhr_C += parsed_tree.count("SBAR")
to_PrPhr_C += parsed_tree.count("PP")
to_AjPhr_C += parsed_tree.count("ADJP")
to_AvPhr_C += parsed_tree.count("ADVP")
result = {
"to_NoPhr_C": to_NoPhr_C,
"as_NoPhr_C": float(division(to_NoPhr_C,n_sent)),
"at_NoPhr_C": float(division(to_NoPhr_C,n_token)),
"ra_NoVeP_C": float(division(to_NoPhr_C,to_VePhr_C)),
"ra_NoSuP_C": float(division(to_NoPhr_C,to_SuPhr_C)),
"ra_NoPrP_C": float(division(to_NoPhr_C,to_PrPhr_C)),
"ra_NoAjP_C": float(division(to_NoPhr_C,to_AjPhr_C)),
"ra_NoAvP_C": float(division(to_NoPhr_C,to_AvPhr_C)),
"to_VePhr_C": to_VePhr_C,
"as_VePhr_C": float(division(to_VePhr_C,n_sent)),
"at_VePhr_C": float(division(to_VePhr_C,n_token)),
"ra_VeNoP_C": float(division(to_VePhr_C,to_NoPhr_C)),
"ra_VeSuP_C": float(division(to_VePhr_C,to_SuPhr_C)),
"ra_VePrP_C": float(division(to_VePhr_C,to_PrPhr_C)),
"ra_VeAjP_C": float(division(to_VePhr_C,to_AjPhr_C)),
"ra_VeAvP_C": float(division(to_VePhr_C,to_AvPhr_C)),
"to_SuPhr_C": to_SuPhr_C,
"as_SuPhr_C": float(division(to_SuPhr_C,n_sent)),
"at_SuPhr_C": float(division(to_SuPhr_C,n_token)),
"ra_SuNoP_C": float(division(to_SuPhr_C,to_NoPhr_C)),
"ra_SuVeP_C": float(division(to_SuPhr_C,to_VePhr_C)),
"ra_SuPrP_C": float(division(to_SuPhr_C,to_PrPhr_C)),
"ra_SuAjP_C": float(division(to_SuPhr_C,to_AjPhr_C)),
"ra_SuAvP_C": float(division(to_SuPhr_C,to_AvPhr_C)),
"to_PrPhr_C": to_PrPhr_C,
"as_PrPhr_C": float(division(to_PrPhr_C,n_sent)),
"at_PrPhr_C": float(division(to_PrPhr_C,n_token)),
"ra_PrNoP_C": float(division(to_PrPhr_C,to_NoPhr_C)),
"ra_PrVeP_C": float(division(to_PrPhr_C,to_VePhr_C)),
"ra_PrSuP_C": float(division(to_PrPhr_C,to_SuPhr_C)),
"ra_PrAjP_C": float(division(to_PrPhr_C,to_AjPhr_C)),
"ra_PrAvP_C": float(division(to_PrPhr_C,to_AvPhr_C)),
"to_AjPhr_C": to_AjPhr_C,
"as_AjPhr_C": float(division(to_AjPhr_C,n_sent)),
"at_AjPhr_C": float(division(to_AjPhr_C,n_token)),
"ra_AjNoP_C": float(division(to_AjPhr_C,to_NoPhr_C)),
"ra_AjVeP_C": float(division(to_AjPhr_C,to_VePhr_C)),
"ra_AjSuP_C": float(division(to_AjPhr_C,to_SuPhr_C)),
"ra_AjPrP_C": float(division(to_AjPhr_C,to_PrPhr_C)),
"ra_AjAvP_C": float(division(to_AjPhr_C,to_AvPhr_C)),
"to_AvPhr_C": to_AvPhr_C,
"as_AvPhr_C": float(division(to_AvPhr_C,n_sent)),
"at_AvPhr_C": float(division(to_AvPhr_C,n_token)),
"ra_AvNoP_C": float(division(to_AvPhr_C,to_NoPhr_C)),
"ra_AvVeP_C": float(division(to_AvPhr_C,to_VePhr_C)),
"ra_AvSuP_C": float(division(to_AvPhr_C,to_SuPhr_C)),
"ra_AvPrP_C": float(division(to_AvPhr_C,to_PrPhr_C)),
"ra_AvAjP_C": float(division(to_AvPhr_C,to_AjPhr_C)),
}
return result
| true
| true
|
1c432b06b387490c72510d448aefe7e7c3c08760
| 949
|
py
|
Python
|
arrays/kids_candies.py
|
wtlow003/leetcode-daily
|
e1d9c74b55e5b3106731a324d70a510e03b3b21f
|
[
"MIT"
] | null | null | null |
arrays/kids_candies.py
|
wtlow003/leetcode-daily
|
e1d9c74b55e5b3106731a324d70a510e03b3b21f
|
[
"MIT"
] | null | null | null |
arrays/kids_candies.py
|
wtlow003/leetcode-daily
|
e1d9c74b55e5b3106731a324d70a510e03b3b21f
|
[
"MIT"
] | 1
|
2022-01-05T17:52:41.000Z
|
2022-01-05T17:52:41.000Z
|
"""
1431. Kids With the Greatest Number of Candies
Given the array candies and the integer extraCandies,
where candies[i] represents the number of candies that the ith kid has.
For each kid check if there is a way to distribute extraCandies among the kids
such that he or she can have the greatest number of candies among them.
Notice that multiple kids can have the greatest number of candies.
Example:
Input: candies = [4,2,1,1,2], extraCandies = 1
Output: [true,false,false,false,false]
Explanation: There is only 1 extra candy, therefore only kid 1 will have
the greatest number of candies among the kids regardless of
who takes the extra candy.
"""
# Runtime: 44ms
class Solution:
def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
arr = []
for candy in candies:
increase_candy = candy + extraCandies
arr.append(increase_candy >= max(candies))
return arr
| 30.612903
| 83
| 0.724974
|
class Solution:
def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
arr = []
for candy in candies:
increase_candy = candy + extraCandies
arr.append(increase_candy >= max(candies))
return arr
| true
| true
|
1c432b1c13b25e0bb055da76df5793b653390c8a
| 3,345
|
py
|
Python
|
setup.py
|
OceanPang/qdtrack
|
b905d2a599a87242d9cf3d01b1833eff155bf688
|
[
"Apache-2.0"
] | 241
|
2020-11-28T03:28:03.000Z
|
2022-03-31T13:27:01.000Z
|
setup.py
|
msg4rajesh/qdtrack
|
b28af06c7fdb6ce99b967302c0c7e9a557d508bf
|
[
"Apache-2.0"
] | 61
|
2020-12-11T20:04:18.000Z
|
2022-03-05T13:49:05.000Z
|
setup.py
|
msg4rajesh/qdtrack
|
b28af06c7fdb6ce99b967302c0c7e9a557d508bf
|
[
"Apache-2.0"
] | 37
|
2020-12-26T08:41:54.000Z
|
2022-03-29T21:52:44.000Z
|
import os
import subprocess
import time
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'qdtrack/version.py'
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from qdtrack.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
version_info = ({})
"""
sha = get_hash()
with open('qdtrack/VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join(SHORT_VERSION.split('.'))
VERSION = SHORT_VERSION + '+' + sha
version_file_str = content.format(time.asctime(), VERSION, SHORT_VERSION,
VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
for i, req in enumerate(requires):
if req.startswith("git"):
pkg_name = req.split("/")[-1].split(".")[0]
req = pkg_name
requires[i] = req
return requires
if __name__ == '__main__':
write_version_py()
setup(
name='qdtrack',
version=get_version(),
description='A template for pytorch projects.',
long_description=readme(),
packages=find_packages(exclude=('configs', 'tools', 'demo')),
package_data={'qdtrack.ops': ['*/*.so']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='Apache License 2.0',
setup_requires=['pytest-runner', 'cython', 'numpy'],
tests_require=['pytest', 'xdoctest'],
install_requires=get_requirements(),
zip_safe=False)
| 28.589744
| 77
| 0.571001
|
import os
import subprocess
import time
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'qdtrack/version.py'
def get_git_hash():
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from qdtrack.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
version_info = ({})
"""
sha = get_hash()
with open('qdtrack/VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join(SHORT_VERSION.split('.'))
VERSION = SHORT_VERSION + '+' + sha
version_file_str = content.format(time.asctime(), VERSION, SHORT_VERSION,
VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
for i, req in enumerate(requires):
if req.startswith("git"):
pkg_name = req.split("/")[-1].split(".")[0]
req = pkg_name
requires[i] = req
return requires
if __name__ == '__main__':
write_version_py()
setup(
name='qdtrack',
version=get_version(),
description='A template for pytorch projects.',
long_description=readme(),
packages=find_packages(exclude=('configs', 'tools', 'demo')),
package_data={'qdtrack.ops': ['*/*.so']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='Apache License 2.0',
setup_requires=['pytest-runner', 'cython', 'numpy'],
tests_require=['pytest', 'xdoctest'],
install_requires=get_requirements(),
zip_safe=False)
| true
| true
|
1c432b857ecf1b0513a984dd6a0888ac62e3d769
| 4,980
|
py
|
Python
|
model_zoo/official/cv/retinanet/eval.py
|
kungfu-team/mindspore-bert
|
71501cf52ae01db9d6a73fb64bcfe68a6509dc32
|
[
"Apache-2.0"
] | 2
|
2021-07-08T13:10:42.000Z
|
2021-11-08T02:48:57.000Z
|
model_zoo/official/cv/retinanet/eval.py
|
peixinhou/mindspore
|
fcb2ec2779b753e95c762cf292b23bd81d1f561b
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/cv/retinanet/eval.py
|
peixinhou/mindspore
|
fcb2ec2779b753e95c762cf292b23bd81d1f561b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Evaluation for retinanet"""
import os
import argparse
import time
import numpy as np
from mindspore import context, Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.retinanet import retinanet50, resnet50, retinanetInferWithDecoder
from src.dataset import create_retinanet_dataset, data_to_mindrecord_byte_image, voc_data_to_mindrecord
from src.config import config
from src.coco_eval import metrics
from src.box_utils import default_boxes
def retinanet_eval(dataset_path, ckpt_path):
"""retinanet evaluation."""
batch_size = 1
ds = create_retinanet_dataset(dataset_path, batch_size=batch_size, repeat_num=1, is_training=False)
backbone = resnet50(config.num_classes)
net = retinanet50(backbone, config)
net = retinanetInferWithDecoder(net, Tensor(default_boxes), config)
print("Load Checkpoint!")
param_dict = load_checkpoint(ckpt_path)
net.init_parameters_data()
load_param_into_net(net, param_dict)
net.set_train(False)
i = batch_size
total = ds.get_dataset_size() * batch_size
start = time.time()
pred_data = []
print("\n========================================\n")
print("total images num: ", total)
print("Processing, please wait a moment.")
for data in ds.create_dict_iterator(output_numpy=True):
img_id = data['img_id']
img_np = data['image']
image_shape = data['image_shape']
output = net(Tensor(img_np))
for batch_idx in range(img_np.shape[0]):
pred_data.append({"boxes": output[0].asnumpy()[batch_idx],
"box_scores": output[1].asnumpy()[batch_idx],
"img_id": int(np.squeeze(img_id[batch_idx])),
"image_shape": image_shape[batch_idx]})
percent = round(i / total * 100., 2)
print(f' {str(percent)} [{i}/{total}]', end='\r')
i += batch_size
cost_time = int((time.time() - start) * 1000)
print(f' 100% [{total}/{total}] cost {cost_time} ms')
mAP = metrics(pred_data)
print("\n========================================\n")
print(f"mAP: {mAP}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='retinanet evaluation')
parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.")
parser.add_argument("--run_platform", type=str, default="Ascend", choices=("Ascend"),
help="run platform, only support Ascend.")
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.run_platform, device_id=args_opt.device_id)
prefix = "retinanet_eval.mindrecord"
mindrecord_dir = config.mindrecord_dir
mindrecord_file = os.path.join(mindrecord_dir, prefix + "0")
if args_opt.dataset == "voc":
config.coco_root = config.voc_root
if not os.path.exists(mindrecord_file):
if not os.path.isdir(mindrecord_dir):
os.makedirs(mindrecord_dir)
if args_opt.dataset == "coco":
if os.path.isdir(config.coco_root):
print("Create Mindrecord.")
data_to_mindrecord_byte_image("coco", False, prefix)
print("Create Mindrecord Done, at {}".format(mindrecord_dir))
else:
print("coco_root not exits.")
elif args_opt.dataset == "voc":
if os.path.isdir(config.voc_dir) and os.path.isdir(config.voc_root):
print("Create Mindrecord.")
voc_data_to_mindrecord(mindrecord_dir, False, prefix)
print("Create Mindrecord Done, at {}".format(mindrecord_dir))
else:
print("voc_root or voc_dir not exits.")
else:
if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path):
print("Create Mindrecord.")
data_to_mindrecord_byte_image("other", False, prefix)
print("Create Mindrecord Done, at {}".format(mindrecord_dir))
else:
print("IMAGE_DIR or ANNO_PATH not exits.")
print("Start Eval!")
retinanet_eval(mindrecord_file, config.checkpoint_path)
| 43.684211
| 115
| 0.644779
|
import os
import argparse
import time
import numpy as np
from mindspore import context, Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.retinanet import retinanet50, resnet50, retinanetInferWithDecoder
from src.dataset import create_retinanet_dataset, data_to_mindrecord_byte_image, voc_data_to_mindrecord
from src.config import config
from src.coco_eval import metrics
from src.box_utils import default_boxes
def retinanet_eval(dataset_path, ckpt_path):
batch_size = 1
ds = create_retinanet_dataset(dataset_path, batch_size=batch_size, repeat_num=1, is_training=False)
backbone = resnet50(config.num_classes)
net = retinanet50(backbone, config)
net = retinanetInferWithDecoder(net, Tensor(default_boxes), config)
print("Load Checkpoint!")
param_dict = load_checkpoint(ckpt_path)
net.init_parameters_data()
load_param_into_net(net, param_dict)
net.set_train(False)
i = batch_size
total = ds.get_dataset_size() * batch_size
start = time.time()
pred_data = []
print("\n========================================\n")
print("total images num: ", total)
print("Processing, please wait a moment.")
for data in ds.create_dict_iterator(output_numpy=True):
img_id = data['img_id']
img_np = data['image']
image_shape = data['image_shape']
output = net(Tensor(img_np))
for batch_idx in range(img_np.shape[0]):
pred_data.append({"boxes": output[0].asnumpy()[batch_idx],
"box_scores": output[1].asnumpy()[batch_idx],
"img_id": int(np.squeeze(img_id[batch_idx])),
"image_shape": image_shape[batch_idx]})
percent = round(i / total * 100., 2)
print(f' {str(percent)} [{i}/{total}]', end='\r')
i += batch_size
cost_time = int((time.time() - start) * 1000)
print(f' 100% [{total}/{total}] cost {cost_time} ms')
mAP = metrics(pred_data)
print("\n========================================\n")
print(f"mAP: {mAP}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='retinanet evaluation')
parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.")
parser.add_argument("--run_platform", type=str, default="Ascend", choices=("Ascend"),
help="run platform, only support Ascend.")
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.run_platform, device_id=args_opt.device_id)
prefix = "retinanet_eval.mindrecord"
mindrecord_dir = config.mindrecord_dir
mindrecord_file = os.path.join(mindrecord_dir, prefix + "0")
if args_opt.dataset == "voc":
config.coco_root = config.voc_root
if not os.path.exists(mindrecord_file):
if not os.path.isdir(mindrecord_dir):
os.makedirs(mindrecord_dir)
if args_opt.dataset == "coco":
if os.path.isdir(config.coco_root):
print("Create Mindrecord.")
data_to_mindrecord_byte_image("coco", False, prefix)
print("Create Mindrecord Done, at {}".format(mindrecord_dir))
else:
print("coco_root not exits.")
elif args_opt.dataset == "voc":
if os.path.isdir(config.voc_dir) and os.path.isdir(config.voc_root):
print("Create Mindrecord.")
voc_data_to_mindrecord(mindrecord_dir, False, prefix)
print("Create Mindrecord Done, at {}".format(mindrecord_dir))
else:
print("voc_root or voc_dir not exits.")
else:
if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path):
print("Create Mindrecord.")
data_to_mindrecord_byte_image("other", False, prefix)
print("Create Mindrecord Done, at {}".format(mindrecord_dir))
else:
print("IMAGE_DIR or ANNO_PATH not exits.")
print("Start Eval!")
retinanet_eval(mindrecord_file, config.checkpoint_path)
| true
| true
|
1c432bcae48e4b7101e228590bdfc40cee2ef124
| 1,622
|
py
|
Python
|
CK_MainScript.py
|
KL-Turner/machL-Sleep-Scoring
|
48a43bba32ee265b48b3fda666a1a92a2fe93032
|
[
"MIT"
] | null | null | null |
CK_MainScript.py
|
KL-Turner/machL-Sleep-Scoring
|
48a43bba32ee265b48b3fda666a1a92a2fe93032
|
[
"MIT"
] | null | null | null |
CK_MainScript.py
|
KL-Turner/machL-Sleep-Scoring
|
48a43bba32ee265b48b3fda666a1a92a2fe93032
|
[
"MIT"
] | null | null | null |
"""
Written by Christina Echagarruga and Kevin L. Turner
Purpose: apply all the necessary pre-processing steps for the matlab -> python workflow to sleep score
Inputs: n matlab files with the extension PythonData.mat, and one file titled animalNotes_baselines.mat with the time indeces
and filenames for resting baseline calculation and subsequent normalization.
Outputs: two csv files, one with the processed data and one with the normalized data from respective resting baselines.
one csv file which is essentially the excel version of the animalNotes_baselines structure.
two subplot pdfs, one for the raw data and one for the normalized data.
Last Revised: April 2nd, 2019
"""
from PreProcData import ConvMAT2CSV
from PreProcData import CalcRestingBaselines
from PreProcData import NormalizeData
# edit data and code directory respectively
rootDir = '/Users/kevinturner/Documents/Jupyter Sleep Scoring/'
codeDir = '/Users/kevinturner/Documents/Core-Analysis/Spyder/'
# convert the matlab file with all the raw data into a csv file. resample it down to 30 hz and apply the necessary filters for the
# respective signals. create a subplot figure showing the raw data.
ConvMAT2CSV(rootDir, codeDir)
# use the start:end time indeces for the baseline files to find the resting baseline for each parameter per day.
uniqueDayArray = CalcRestingBaselines(rootDir, codeDir)
# apply the baseline values for each unique day to each respective signal. create a subplot showing the normalized data.
# save a csv file with the normalized values
NormalizeData(rootDir, codeDir, uniqueDayArray)
| 47.705882
| 130
| 0.795931
|
from PreProcData import ConvMAT2CSV
from PreProcData import CalcRestingBaselines
from PreProcData import NormalizeData
rootDir = '/Users/kevinturner/Documents/Jupyter Sleep Scoring/'
codeDir = '/Users/kevinturner/Documents/Core-Analysis/Spyder/'
ConvMAT2CSV(rootDir, codeDir)
uniqueDayArray = CalcRestingBaselines(rootDir, codeDir)
NormalizeData(rootDir, codeDir, uniqueDayArray)
| true
| true
|
1c432ce5d445e34617ca5e5e4d09085f17c8434a
| 5,251
|
py
|
Python
|
src/sagemaker/mxnet/model.py
|
evanfwelch/sagemaker-python-sdk
|
8b3d113a23c09995c6a6a5d12d4364e27bfd549d
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/mxnet/model.py
|
evanfwelch/sagemaker-python-sdk
|
8b3d113a23c09995c6a6a5d12d4364e27bfd549d
|
[
"Apache-2.0"
] | 2
|
2018-04-09T17:53:10.000Z
|
2018-04-09T17:53:38.000Z
|
src/sagemaker/mxnet/model.py
|
evanfwelch/sagemaker-python-sdk
|
8b3d113a23c09995c6a6a5d12d4364e27bfd549d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import sagemaker
from sagemaker.fw_utils import create_image_uri, model_code_key_prefix
from sagemaker.model import FrameworkModel, MODEL_SERVER_WORKERS_PARAM_NAME
from sagemaker.mxnet.defaults import MXNET_VERSION
from sagemaker.predictor import RealTimePredictor, json_serializer, json_deserializer
class MXNetPredictor(RealTimePredictor):
"""A RealTimePredictor for inference against MXNet Endpoints.
This is able to serialize Python lists, dictionaries, and numpy arrays to multidimensional tensors for MXNet
inference."""
def __init__(self, endpoint_name, sagemaker_session=None):
"""Initialize an ``MXNetPredictor``.
Args:
endpoint_name (str): The name of the endpoint to perform inference on.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
"""
super(MXNetPredictor, self).__init__(endpoint_name, sagemaker_session, json_serializer, json_deserializer)
class MXNetModel(FrameworkModel):
"""An MXNet SageMaker ``Model`` that can be deployed to a SageMaker ``Endpoint``."""
__framework_name__ = 'mxnet'
def __init__(self, model_data, role, entry_point, image=None, py_version='py2', framework_version=MXNET_VERSION,
predictor_cls=MXNetPredictor, model_server_workers=None, **kwargs):
"""Initialize an MXNetModel.
Args:
model_data (str): The S3 location of a SageMaker model data ``.tar.gz`` file.
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
After the endpoint is created, the inference code might use the IAM role,
if it needs to access an AWS resource.
entry_point (str): Path (absolute or relative) to the Python source file which should be executed
as the entry point to model hosting. This should be compatible with either Python 2.7 or Python 3.5.
image (str): A Docker image URI (default: None). If not specified, a default image for MXNet will be used.
py_version (str): Python version you want to use for executing your model training code (default: 'py2').
framework_version (str): MXNet version you want to use for executing your model training code.
predictor_cls (callable[str, sagemaker.session.Session]): A function to call to create a predictor
with an endpoint name and SageMaker ``Session``. If specified, ``deploy()`` returns the result of
invoking this function on the created endpoint name.
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
**kwargs: Keyword arguments passed to the ``FrameworkModel`` initializer.
"""
super(MXNetModel, self).__init__(model_data, image, role, entry_point, predictor_cls=predictor_cls,
**kwargs)
self.py_version = py_version
self.framework_version = framework_version
self.model_server_workers = model_server_workers
def prepare_container_def(self, instance_type):
"""Return a container definition with framework configuration set in model environment variables.
Args:
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.
Returns:
dict[str, str]: A container definition object usable with the CreateModel API.
"""
deploy_image = self.image
if not deploy_image:
region_name = self.sagemaker_session.boto_session.region_name
deploy_image = create_image_uri(region_name, self.__framework_name__, instance_type,
self.framework_version, self.py_version)
deploy_key_prefix = model_code_key_prefix(self.key_prefix, self.name, deploy_image)
self._upload_code(deploy_key_prefix)
deploy_env = dict(self.env)
deploy_env.update(self._framework_env_vars())
if self.model_server_workers:
deploy_env[MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(self.model_server_workers)
return sagemaker.container_def(deploy_image, self.model_data, deploy_env)
| 54.697917
| 118
| 0.703675
|
from __future__ import absolute_import
import sagemaker
from sagemaker.fw_utils import create_image_uri, model_code_key_prefix
from sagemaker.model import FrameworkModel, MODEL_SERVER_WORKERS_PARAM_NAME
from sagemaker.mxnet.defaults import MXNET_VERSION
from sagemaker.predictor import RealTimePredictor, json_serializer, json_deserializer
class MXNetPredictor(RealTimePredictor):
def __init__(self, endpoint_name, sagemaker_session=None):
super(MXNetPredictor, self).__init__(endpoint_name, sagemaker_session, json_serializer, json_deserializer)
class MXNetModel(FrameworkModel):
__framework_name__ = 'mxnet'
def __init__(self, model_data, role, entry_point, image=None, py_version='py2', framework_version=MXNET_VERSION,
predictor_cls=MXNetPredictor, model_server_workers=None, **kwargs):
super(MXNetModel, self).__init__(model_data, image, role, entry_point, predictor_cls=predictor_cls,
**kwargs)
self.py_version = py_version
self.framework_version = framework_version
self.model_server_workers = model_server_workers
def prepare_container_def(self, instance_type):
deploy_image = self.image
if not deploy_image:
region_name = self.sagemaker_session.boto_session.region_name
deploy_image = create_image_uri(region_name, self.__framework_name__, instance_type,
self.framework_version, self.py_version)
deploy_key_prefix = model_code_key_prefix(self.key_prefix, self.name, deploy_image)
self._upload_code(deploy_key_prefix)
deploy_env = dict(self.env)
deploy_env.update(self._framework_env_vars())
if self.model_server_workers:
deploy_env[MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(self.model_server_workers)
return sagemaker.container_def(deploy_image, self.model_data, deploy_env)
| true
| true
|
1c432e7d125192df507522f510ae7b88db0c26f1
| 83
|
py
|
Python
|
import_coords/__main__.py
|
gwvsol/ImportingCSVtoPostgres
|
0d23418b5f7c2c981b020d7e3d5a76905ebf0d45
|
[
"MIT"
] | null | null | null |
import_coords/__main__.py
|
gwvsol/ImportingCSVtoPostgres
|
0d23418b5f7c2c981b020d7e3d5a76905ebf0d45
|
[
"MIT"
] | null | null | null |
import_coords/__main__.py
|
gwvsol/ImportingCSVtoPostgres
|
0d23418b5f7c2c981b020d7e3d5a76905ebf0d45
|
[
"MIT"
] | null | null | null |
from .import_coords import run_import
if __name__ == "__main__":
run_import()
| 16.6
| 37
| 0.73494
|
from .import_coords import run_import
if __name__ == "__main__":
run_import()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.