body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def select_threshold(model=None, data=None, curve=None, FPR=None, FNR=None, thread_count=(- 1)):
'\n Selects a threshold for prediction.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n Set of samples to build ROC curve with.\n If set, curve parameter must not be set.\n\n curve : tuple of three arrays (fpr, tpr, thresholds)\n ROC curve points in format of get_roc_curve returned value.\n If set, data parameter must not be set.\n\n FPR : desired false-positive rate\n\n FNR : desired false-negative rate (only one of FPR and FNR should be chosen)\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n threshold : double\n '
if (data is not None):
if (curve is not None):
raise CatboostError('Only one of the parameters data and curve should be set.')
if (model is None):
raise CatboostError('model and data parameters should be set when curve parameter is None.')
if (type(data) == Pool):
data = [data]
if (not isinstance(data, list)):
raise CatboostError('data must be a catboost.Pool or list of pools.')
for pool in data:
if (not isinstance(pool, Pool)):
raise CatboostError('one of data pools is not catboost.Pool')
elif (curve is not None):
if ((not (isinstance(curve, list) or isinstance(curve, tuple))) or (len(curve) != 3)):
raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')
else:
raise CatboostError('One of the parameters data and curve should be set.')
return _select_threshold(model._object, data, curve, FPR, FNR, thread_count)
| 5,361,321,896,420,254,000
|
Selects a threshold for prediction.
Parameters
----------
model : catboost.CatBoost
The trained model.
data : catboost.Pool or list of catboost.Pool
Set of samples to build ROC curve with.
If set, curve parameter must not be set.
curve : tuple of three arrays (fpr, tpr, thresholds)
ROC curve points in format of get_roc_curve returned value.
If set, data parameter must not be set.
FPR : desired false-positive rate
FNR : desired false-negative rate (only one of FPR and FNR should be chosen)
thread_count : int (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
threshold : double
|
catboost/python-package/catboost/utils.py
|
select_threshold
|
infected-mushroom/catboost
|
python
|
def select_threshold(model=None, data=None, curve=None, FPR=None, FNR=None, thread_count=(- 1)):
'\n Selects a threshold for prediction.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n Set of samples to build ROC curve with.\n If set, curve parameter must not be set.\n\n curve : tuple of three arrays (fpr, tpr, thresholds)\n ROC curve points in format of get_roc_curve returned value.\n If set, data parameter must not be set.\n\n FPR : desired false-positive rate\n\n FNR : desired false-negative rate (only one of FPR and FNR should be chosen)\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n threshold : double\n '
if (data is not None):
if (curve is not None):
raise CatboostError('Only one of the parameters data and curve should be set.')
if (model is None):
raise CatboostError('model and data parameters should be set when curve parameter is None.')
if (type(data) == Pool):
data = [data]
if (not isinstance(data, list)):
raise CatboostError('data must be a catboost.Pool or list of pools.')
for pool in data:
if (not isinstance(pool, Pool)):
raise CatboostError('one of data pools is not catboost.Pool')
elif (curve is not None):
if ((not (isinstance(curve, list) or isinstance(curve, tuple))) or (len(curve) != 3)):
raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')
else:
raise CatboostError('One of the parameters data and curve should be set.')
return _select_threshold(model._object, data, curve, FPR, FNR, thread_count)
|
def make_color_data(data_dict):
'\n make a dataset consisting of the i-band mag and the five colors\n Returns:\n --------\n input_data: (nd-array)\n array of imag and 5 colors\n '
input_data = data_dict['mag_i_lsst']
bands = ['u', 'g', 'r', 'i', 'z', 'y']
for i in range(5):
band1 = data_dict[f'mag_{bands[i]}_lsst']
band2 = data_dict[f'mag_{bands[(i + 1)]}_lsst']
input_data = np.vstack((input_data, (band1 - band2)))
return input_data.T
| 7,638,051,411,495,268,000
|
make a dataset consisting of the i-band mag and the five colors
Returns:
--------
input_data: (nd-array)
array of imag and 5 colors
|
rail/estimation/algos/sklearn_nn.py
|
make_color_data
|
pwhatfield/RAIL
|
python
|
def make_color_data(data_dict):
'\n make a dataset consisting of the i-band mag and the five colors\n Returns:\n --------\n input_data: (nd-array)\n array of imag and 5 colors\n '
input_data = data_dict['mag_i_lsst']
bands = ['u', 'g', 'r', 'i', 'z', 'y']
for i in range(5):
band1 = data_dict[f'mag_{bands[i]}_lsst']
band2 = data_dict[f'mag_{bands[(i + 1)]}_lsst']
input_data = np.vstack((input_data, (band1 - band2)))
return input_data.T
|
def __init__(self, base_config, config_dict):
'\n Parameters:\n -----------\n run_dict: dict\n dictionary of all variables read in from the run_params\n values in the yaml file\n '
super().__init__(base_config=base_config, config_dict=config_dict)
inputs = self.config_dict['run_params']
self.width = inputs['width']
self.zmin = inputs['zmin']
self.zmax = inputs['zmax']
self.nzbins = inputs['nzbins']
np.random.seed(71)
| 153,539,649,409,833,100
|
Parameters:
-----------
run_dict: dict
dictionary of all variables read in from the run_params
values in the yaml file
|
rail/estimation/algos/sklearn_nn.py
|
__init__
|
pwhatfield/RAIL
|
python
|
def __init__(self, base_config, config_dict):
'\n Parameters:\n -----------\n run_dict: dict\n dictionary of all variables read in from the run_params\n values in the yaml file\n '
super().__init__(base_config=base_config, config_dict=config_dict)
inputs = self.config_dict['run_params']
self.width = inputs['width']
self.zmin = inputs['zmin']
self.zmax = inputs['zmax']
self.nzbins = inputs['nzbins']
np.random.seed(71)
|
def inform(self):
'\n train the NN model\n '
speczs = self.training_data['redshift']
print('stacking some data...')
color_data = make_color_data(self.training_data)
input_data = regularize_data(color_data)
simplenn = sknn.MLPRegressor(hidden_layer_sizes=(12, 12), activation='tanh', solver='lbfgs')
simplenn.fit(input_data, speczs)
self.model = simplenn
| -8,780,587,404,442,871,000
|
train the NN model
|
rail/estimation/algos/sklearn_nn.py
|
inform
|
pwhatfield/RAIL
|
python
|
def inform(self):
'\n \n '
speczs = self.training_data['redshift']
print('stacking some data...')
color_data = make_color_data(self.training_data)
input_data = regularize_data(color_data)
simplenn = sknn.MLPRegressor(hidden_layer_sizes=(12, 12), activation='tanh', solver='lbfgs')
simplenn.fit(input_data, speczs)
self.model = simplenn
|
def get_agent_distribution_builder(distribution, python_version):
"\n Find agent distribution docker image for smoke testing.\n :param distribution: distribution name on which agent package should be installed.\n Possible values are in the 'ALL_DISTRIBUTION_NAMES' constant.\n :param python_version: Version of the python interpreter in the distribution.\n "
distribution = distribution.lower()
dockerfiles_directory_path = (Path(__file__).parent / 'distribution_dockerfiles')
fpm_builder_dockerfile_path = (dockerfiles_directory_path / 'Dockerfile.fpm_package_builder')
fpm_package_builder_dockerfile_content = fpm_builder_dockerfile_path.read_text()
if (distribution == AMAZONLINUX):
class AmazonLinuxSmokeImageBuilder(AgentImageBuilder):
PYTHON_VERSION = python_version
COPY_AGENT_SOURCE = True
IMAGE_TAG = 'scalyr_agent_smoke_{0}_{1}'.format(distribution, python_version)
@classmethod
def get_dockerfile_content(cls):
dockerfile_path = (dockerfiles_directory_path / 'Dockerfile.amazonlinux')
dockerfile_content = dockerfile_path.read_text()
return dockerfile_content.format(fpm_package_builder_dockerfile=fpm_package_builder_dockerfile_content, python_version=cls.PYTHON_VERSION)
return AmazonLinuxSmokeImageBuilder
elif (distribution == UBUNTU):
class _UbuntuSmokeImageBuilder(AgentImageBuilder):
PYTHON_VERSION = python_version
COPY_AGENT_SOURCE = True
IMAGE_TAG = 'scalyr_agent_smoke_{0}_{1}'.format(distribution, python_version)
@classmethod
def get_dockerfile_content(cls):
dockerfile_path = (dockerfiles_directory_path / 'Dockerfile.ubuntu')
dockerfile_content = dockerfile_path.read_text()
return dockerfile_content.format(fpm_package_builder_dockerfile=fpm_package_builder_dockerfile_content, python_package_name=('python' if (cls.PYTHON_VERSION == 'python2') else cls.PYTHON_VERSION), python_version=cls.PYTHON_VERSION)
return _UbuntuSmokeImageBuilder
else:
raise IOError('Can not find such distribution: {0}'.format(distribution))
| -431,017,511,871,805,100
|
Find agent distribution docker image for smoke testing.
:param distribution: distribution name on which agent package should be installed.
Possible values are in the 'ALL_DISTRIBUTION_NAMES' constant.
:param python_version: Version of the python interpreter in the distribution.
|
smoke_tests/tools/package/__init__.py
|
get_agent_distribution_builder
|
zak905/scalyr-agent-2
|
python
|
def get_agent_distribution_builder(distribution, python_version):
"\n Find agent distribution docker image for smoke testing.\n :param distribution: distribution name on which agent package should be installed.\n Possible values are in the 'ALL_DISTRIBUTION_NAMES' constant.\n :param python_version: Version of the python interpreter in the distribution.\n "
distribution = distribution.lower()
dockerfiles_directory_path = (Path(__file__).parent / 'distribution_dockerfiles')
fpm_builder_dockerfile_path = (dockerfiles_directory_path / 'Dockerfile.fpm_package_builder')
fpm_package_builder_dockerfile_content = fpm_builder_dockerfile_path.read_text()
if (distribution == AMAZONLINUX):
class AmazonLinuxSmokeImageBuilder(AgentImageBuilder):
PYTHON_VERSION = python_version
COPY_AGENT_SOURCE = True
IMAGE_TAG = 'scalyr_agent_smoke_{0}_{1}'.format(distribution, python_version)
@classmethod
def get_dockerfile_content(cls):
dockerfile_path = (dockerfiles_directory_path / 'Dockerfile.amazonlinux')
dockerfile_content = dockerfile_path.read_text()
return dockerfile_content.format(fpm_package_builder_dockerfile=fpm_package_builder_dockerfile_content, python_version=cls.PYTHON_VERSION)
return AmazonLinuxSmokeImageBuilder
elif (distribution == UBUNTU):
class _UbuntuSmokeImageBuilder(AgentImageBuilder):
PYTHON_VERSION = python_version
COPY_AGENT_SOURCE = True
IMAGE_TAG = 'scalyr_agent_smoke_{0}_{1}'.format(distribution, python_version)
@classmethod
def get_dockerfile_content(cls):
dockerfile_path = (dockerfiles_directory_path / 'Dockerfile.ubuntu')
dockerfile_content = dockerfile_path.read_text()
return dockerfile_content.format(fpm_package_builder_dockerfile=fpm_package_builder_dockerfile_content, python_package_name=('python' if (cls.PYTHON_VERSION == 'python2') else cls.PYTHON_VERSION), python_version=cls.PYTHON_VERSION)
return _UbuntuSmokeImageBuilder
else:
raise IOError('Can not find such distribution: {0}'.format(distribution))
|
def readVectorFromFile(UFile):
' \n\tArg: \n\ttauFile: The directory path of OpenFOAM vector file (e.g., velocity)\n\n\tRegurn: \n\tvector: Matrix of vector \n\t'
resMid = extractVector(UFile)
fout = open('Utemp', 'w')
glob_pattern = resMid.group()
glob_pattern = re.sub('\\(', '', glob_pattern)
glob_pattern = re.sub('\\)', '', glob_pattern)
fout.write(glob_pattern)
fout.close()
vector = np.loadtxt('Utemp')
return vector
| 7,155,401,464,782,885,000
|
Arg:
tauFile: The directory path of OpenFOAM vector file (e.g., velocity)
Regurn:
vector: Matrix of vector
|
demo0/foamFileOperation.py
|
readVectorFromFile
|
Jianxun-Wang/PICNNSR
|
python
|
def readVectorFromFile(UFile):
' \n\tArg: \n\ttauFile: The directory path of OpenFOAM vector file (e.g., velocity)\n\n\tRegurn: \n\tvector: Matrix of vector \n\t'
resMid = extractVector(UFile)
fout = open('Utemp', 'w')
glob_pattern = resMid.group()
glob_pattern = re.sub('\\(', , glob_pattern)
glob_pattern = re.sub('\\)', , glob_pattern)
fout.write(glob_pattern)
fout.close()
vector = np.loadtxt('Utemp')
return vector
|
def readScalarFromFile(fileName):
' \n\n\tArg: \n\tfileName: The file name of OpenFOAM scalar field\n\n\tRegurn: \n\ta vector of scalar field \n\t'
resMid = extractScalar(fileName)
fout = open('temp.txt', 'w')
glob_patternx = resMid.group()
glob_patternx = re.sub('\\(', '', glob_patternx)
glob_patternx = re.sub('\\)', '', glob_patternx)
fout.write(glob_patternx)
fout.close()
scalarVec = np.loadtxt('temp.txt')
return scalarVec
| -5,231,281,418,861,886,000
|
Arg:
fileName: The file name of OpenFOAM scalar field
Regurn:
a vector of scalar field
|
demo0/foamFileOperation.py
|
readScalarFromFile
|
Jianxun-Wang/PICNNSR
|
python
|
def readScalarFromFile(fileName):
' \n\n\tArg: \n\tfileName: The file name of OpenFOAM scalar field\n\n\tRegurn: \n\ta vector of scalar field \n\t'
resMid = extractScalar(fileName)
fout = open('temp.txt', 'w')
glob_patternx = resMid.group()
glob_patternx = re.sub('\\(', , glob_patternx)
glob_patternx = re.sub('\\)', , glob_patternx)
fout.write(glob_patternx)
fout.close()
scalarVec = np.loadtxt('temp.txt')
return scalarVec
|
def extractVector(vectorFile):
' Function is using regular expression select Vector value out\n\t\n\tArgs:\n\tUFile: The directory path of file: U\n\n\tReturns:\n\tresMid: the U as (Ux1,Uy1,Uz1);(Ux2,Uy2,Uz2);........\n\t'
fin = open(vectorFile, 'r')
line = fin.read()
fin.close()
patternMid = re.compile('\n\t(\n\t\\( # match(\n\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t(\\ ) # match space\n\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t(\\ ) # match space\n\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t\\) # match )\n\t\\n # match next line\n\t)+ # search greedly\n\t', (re.DOTALL | re.VERBOSE))
resMid = patternMid.search(line)
return resMid
| -6,847,724,994,685,365,000
|
Function is using regular expression select Vector value out
Args:
UFile: The directory path of file: U
Returns:
resMid: the U as (Ux1,Uy1,Uz1);(Ux2,Uy2,Uz2);........
|
demo0/foamFileOperation.py
|
extractVector
|
Jianxun-Wang/PICNNSR
|
python
|
def extractVector(vectorFile):
' Function is using regular expression select Vector value out\n\t\n\tArgs:\n\tUFile: The directory path of file: U\n\n\tReturns:\n\tresMid: the U as (Ux1,Uy1,Uz1);(Ux2,Uy2,Uz2);........\n\t'
fin = open(vectorFile, 'r')
line = fin.read()
fin.close()
patternMid = re.compile('\n\t(\n\t\\( # match(\n\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t(\\ ) # match space\n\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t(\\ ) # match space\n\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t\\) # match )\n\t\\n # match next line\n\t)+ # search greedly\n\t', (re.DOTALL | re.VERBOSE))
resMid = patternMid.search(line)
return resMid
|
def extractScalar(scalarFile):
' subFunction of readTurbStressFromFile\n\t\tUsing regular expression to select scalar value out \n\t\n\tArgs:\n\tscalarFile: The directory path of file of scalar\n\n\tReturns:\n\tresMid: scalar selected;\n\t\t\tyou need use resMid.group() to see the content.\n\t'
fin = open(scalarFile, 'r')
line = fin.read()
fin.close()
patternMid = re.compile('\n\t\t\\( # match"("\n\t\t\\n # match next line\n\t\t(\n\t\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t\t\\n # match next line\n\t\t)+ # search greedly\n\t\t\\) # match")"\n\t', (re.DOTALL | re.VERBOSE))
resMid = patternMid.search(line)
return resMid
| 278,923,963,489,089,860
|
subFunction of readTurbStressFromFile
Using regular expression to select scalar value out
Args:
scalarFile: The directory path of file of scalar
Returns:
resMid: scalar selected;
you need use resMid.group() to see the content.
|
demo0/foamFileOperation.py
|
extractScalar
|
Jianxun-Wang/PICNNSR
|
python
|
def extractScalar(scalarFile):
' subFunction of readTurbStressFromFile\n\t\tUsing regular expression to select scalar value out \n\t\n\tArgs:\n\tscalarFile: The directory path of file of scalar\n\n\tReturns:\n\tresMid: scalar selected;\n\t\t\tyou need use resMid.group() to see the content.\n\t'
fin = open(scalarFile, 'r')
line = fin.read()
fin.close()
patternMid = re.compile('\n\t\t\\( # match"("\n\t\t\\n # match next line\n\t\t(\n\t\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t\t\\n # match next line\n\t\t)+ # search greedly\n\t\t\\) # match")"\n\t', (re.DOTALL | re.VERBOSE))
resMid = patternMid.search(line)
return resMid
|
def evaluate(X: np.ndarray, A: float=7.0, B: float=0.1) -> np.ndarray:
"Non-monotonic Ishigami-Homma three parameter test function:\n\n `f(x) = \\sin(x_{1}) + A \\sin(x_{2})^2 + Bx^{4}_{3}\\sin(x_{1})`\n\n This test function is commonly used to benchmark global sensitivity \n methods as variance-based sensitivities of this function can be \n analytically determined.\n\n See listed references below.\n\n In [2], the expected first-order indices are:\n\n x1: 0.3139\n x2: 0.4424\n x3: 0.0\n\n when A = 7, B = 0.1 when conducting Sobol' analysis with the\n Saltelli sampling method with a sample size of 1000.\n\n\n Parameters\n ----------\n X : np.ndarray\n An `N*D` array holding values for each parameter, where `N` is the \n number of samples and `D` is the number of parameters \n (in this case, three).\n A : float\n Constant `A` parameter\n B : float\n Constant `B` parameter\n\n Returns\n -------\n Y : np.ndarray\n\n References\n ----------\n .. [1] Ishigami, T., Homma, T., 1990. \n An importance quantification technique in uncertainty analysis for \n computer models. \n Proceedings. First International Symposium on Uncertainty Modeling \n and Analysis. \n https://doi.org/10.1109/ISUMA.1990.151285\n\n .. [2] Saltelli, A., Ratto, M., Andres, T., Campolongo, F., Cariboni, J., \n Gatelli, D., Saisana, M., Tarantola, S., 2008. \n Global Sensitivity Analysis: The Primer. Wiley, West Sussex, U.K.\n https://dx.doi.org/10.1002/9780470725184\n "
Y = np.zeros(X.shape[0])
Y = ((np.sin(X[:, 0]) + (A * np.power(np.sin(X[:, 1]), 2))) + ((B * np.power(X[:, 2], 4)) * np.sin(X[:, 0])))
return Y
| 5,111,868,029,495,385,000
|
Non-monotonic Ishigami-Homma three parameter test function:
`f(x) = \sin(x_{1}) + A \sin(x_{2})^2 + Bx^{4}_{3}\sin(x_{1})`
This test function is commonly used to benchmark global sensitivity
methods as variance-based sensitivities of this function can be
analytically determined.
See listed references below.
In [2], the expected first-order indices are:
x1: 0.3139
x2: 0.4424
x3: 0.0
when A = 7, B = 0.1 when conducting Sobol' analysis with the
Saltelli sampling method with a sample size of 1000.
Parameters
----------
X : np.ndarray
An `N*D` array holding values for each parameter, where `N` is the
number of samples and `D` is the number of parameters
(in this case, three).
A : float
Constant `A` parameter
B : float
Constant `B` parameter
Returns
-------
Y : np.ndarray
References
----------
.. [1] Ishigami, T., Homma, T., 1990.
An importance quantification technique in uncertainty analysis for
computer models.
Proceedings. First International Symposium on Uncertainty Modeling
and Analysis.
https://doi.org/10.1109/ISUMA.1990.151285
.. [2] Saltelli, A., Ratto, M., Andres, T., Campolongo, F., Cariboni, J.,
Gatelli, D., Saisana, M., Tarantola, S., 2008.
Global Sensitivity Analysis: The Primer. Wiley, West Sussex, U.K.
https://dx.doi.org/10.1002/9780470725184
|
src/SALib/test_functions/Ishigami.py
|
evaluate
|
QianWanghhu/SALib
|
python
|
def evaluate(X: np.ndarray, A: float=7.0, B: float=0.1) -> np.ndarray:
"Non-monotonic Ishigami-Homma three parameter test function:\n\n `f(x) = \\sin(x_{1}) + A \\sin(x_{2})^2 + Bx^{4}_{3}\\sin(x_{1})`\n\n This test function is commonly used to benchmark global sensitivity \n methods as variance-based sensitivities of this function can be \n analytically determined.\n\n See listed references below.\n\n In [2], the expected first-order indices are:\n\n x1: 0.3139\n x2: 0.4424\n x3: 0.0\n\n when A = 7, B = 0.1 when conducting Sobol' analysis with the\n Saltelli sampling method with a sample size of 1000.\n\n\n Parameters\n ----------\n X : np.ndarray\n An `N*D` array holding values for each parameter, where `N` is the \n number of samples and `D` is the number of parameters \n (in this case, three).\n A : float\n Constant `A` parameter\n B : float\n Constant `B` parameter\n\n Returns\n -------\n Y : np.ndarray\n\n References\n ----------\n .. [1] Ishigami, T., Homma, T., 1990. \n An importance quantification technique in uncertainty analysis for \n computer models. \n Proceedings. First International Symposium on Uncertainty Modeling \n and Analysis. \n https://doi.org/10.1109/ISUMA.1990.151285\n\n .. [2] Saltelli, A., Ratto, M., Andres, T., Campolongo, F., Cariboni, J., \n Gatelli, D., Saisana, M., Tarantola, S., 2008. \n Global Sensitivity Analysis: The Primer. Wiley, West Sussex, U.K.\n https://dx.doi.org/10.1002/9780470725184\n "
Y = np.zeros(X.shape[0])
Y = ((np.sin(X[:, 0]) + (A * np.power(np.sin(X[:, 1]), 2))) + ((B * np.power(X[:, 2], 4)) * np.sin(X[:, 0])))
return Y
|
def _cell(self, x: torch.Tensor, i: torch.Tensor, states: Tuple[(torch.Tensor, torch.Tensor)]) -> Tuple[(torch.Tensor, torch.Tensor)]:
'Single time step logic of EA-LSTM cell'
(h_0, c_0) = states
gates = self.dynamic_gates(h_0, x)
(f, o, g) = gates.chunk(3, 1)
c_1 = ((torch.sigmoid(f) * c_0) + (i * torch.tanh(g)))
h_1 = (torch.sigmoid(o) * torch.tanh(c_1))
return (h_1, c_1)
| 7,109,717,501,171,930,000
|
Single time step logic of EA-LSTM cell
|
neuralhydrology/modelzoo/ealstm.py
|
_cell
|
visr/neuralhydrology
|
python
|
def _cell(self, x: torch.Tensor, i: torch.Tensor, states: Tuple[(torch.Tensor, torch.Tensor)]) -> Tuple[(torch.Tensor, torch.Tensor)]:
(h_0, c_0) = states
gates = self.dynamic_gates(h_0, x)
(f, o, g) = gates.chunk(3, 1)
c_1 = ((torch.sigmoid(f) * c_0) + (i * torch.tanh(g)))
h_1 = (torch.sigmoid(o) * torch.tanh(c_1))
return (h_1, c_1)
|
def forward(self, data: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
'Perform a forward pass on the EA-LSTM model.\n\n Parameters\n ----------\n data : Dict[str, torch.Tensor]\n Dictionary, containing input features as key-value pairs.\n\n Returns\n -------\n Dict[str, torch.Tensor]\n Model outputs and intermediate states as a dictionary. \n - `y_hat`: model predictions of shape [batch size, sequence length, number of target variables].\n - `h_n`: hidden state at the last time step of the sequence of shape \n [batch size, sequence length, number of target variables].\n - `c_n`: cell state at the last time step of the sequence of shape \n [batch size, sequence length, number of target variables].\n '
x_d = data['x_d'].transpose(0, 1)
if (('x_s' in data) and ('x_one_hot' in data)):
x_s = torch.cat([data['x_s'], data['x_one_hot']], dim=(- 1))
elif ('x_s' in data):
x_s = data['x_s']
elif ('x_one_hot' in data):
x_s = data['x_one_hot']
else:
raise ValueError('Need x_s or x_one_hot in forward pass.')
h_t = x_d.data.new(x_d.shape[1], self._hidden_size).zero_()
c_t = x_d.data.new(x_d.shape[1], self._hidden_size).zero_()
(h_n, c_n) = ([], [])
i = torch.sigmoid(self.input_gate(x_s))
for x_dt in x_d:
(h_t, c_t) = self._cell(x_dt, i, (h_t, c_t))
h_n.append(h_t)
c_n.append(c_t)
h_n = torch.stack(h_n, 0).transpose(0, 1)
c_n = torch.stack(c_n, 0).transpose(0, 1)
pred = {'h_n': h_n, 'c_n': c_n}
pred.update(self.head(self.dropout(h_n)))
return pred
| -4,725,919,336,773,386,000
|
Perform a forward pass on the EA-LSTM model.
Parameters
----------
data : Dict[str, torch.Tensor]
Dictionary, containing input features as key-value pairs.
Returns
-------
Dict[str, torch.Tensor]
Model outputs and intermediate states as a dictionary.
- `y_hat`: model predictions of shape [batch size, sequence length, number of target variables].
- `h_n`: hidden state at the last time step of the sequence of shape
[batch size, sequence length, number of target variables].
- `c_n`: cell state at the last time step of the sequence of shape
[batch size, sequence length, number of target variables].
|
neuralhydrology/modelzoo/ealstm.py
|
forward
|
visr/neuralhydrology
|
python
|
def forward(self, data: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
'Perform a forward pass on the EA-LSTM model.\n\n Parameters\n ----------\n data : Dict[str, torch.Tensor]\n Dictionary, containing input features as key-value pairs.\n\n Returns\n -------\n Dict[str, torch.Tensor]\n Model outputs and intermediate states as a dictionary. \n - `y_hat`: model predictions of shape [batch size, sequence length, number of target variables].\n - `h_n`: hidden state at the last time step of the sequence of shape \n [batch size, sequence length, number of target variables].\n - `c_n`: cell state at the last time step of the sequence of shape \n [batch size, sequence length, number of target variables].\n '
x_d = data['x_d'].transpose(0, 1)
if (('x_s' in data) and ('x_one_hot' in data)):
x_s = torch.cat([data['x_s'], data['x_one_hot']], dim=(- 1))
elif ('x_s' in data):
x_s = data['x_s']
elif ('x_one_hot' in data):
x_s = data['x_one_hot']
else:
raise ValueError('Need x_s or x_one_hot in forward pass.')
h_t = x_d.data.new(x_d.shape[1], self._hidden_size).zero_()
c_t = x_d.data.new(x_d.shape[1], self._hidden_size).zero_()
(h_n, c_n) = ([], [])
i = torch.sigmoid(self.input_gate(x_s))
for x_dt in x_d:
(h_t, c_t) = self._cell(x_dt, i, (h_t, c_t))
h_n.append(h_t)
c_n.append(c_t)
h_n = torch.stack(h_n, 0).transpose(0, 1)
c_n = torch.stack(c_n, 0).transpose(0, 1)
pred = {'h_n': h_n, 'c_n': c_n}
pred.update(self.head(self.dropout(h_n)))
return pred
|
def _reset_parameters(self):
'Special initialization of certain model weights.'
nn.init.orthogonal_(self.weight_ih.data)
weight_hh_data = torch.eye(self.cfg.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
if (self.cfg.initial_forget_bias is not None):
self.bias.data[:self.cfg.hidden_size] = self.cfg.initial_forget_bias
| 7,564,674,853,396,321,000
|
Special initialization of certain model weights.
|
neuralhydrology/modelzoo/ealstm.py
|
_reset_parameters
|
visr/neuralhydrology
|
python
|
def _reset_parameters(self):
nn.init.orthogonal_(self.weight_ih.data)
weight_hh_data = torch.eye(self.cfg.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
if (self.cfg.initial_forget_bias is not None):
self.bias.data[:self.cfg.hidden_size] = self.cfg.initial_forget_bias
|
def naive_cut_rod_recursive(n: int, prices: list):
'\n Solves the rod-cutting problem via naively without using the benefit of dynamic\n programming. The results is the same sub-problems are solved several times\n leading to an exponential runtime\n\n Runtime: O(2^n)\n\n Arguments\n -------\n n: int, the length of the rod\n prices: list, the prices for each piece of rod. ``p[i-i]`` is the\n price for a rod of length ``i``\n\n Returns\n -------\n The maximum revenue obtainable for a rod of length n given the list of prices\n for each piece.\n\n Examples\n --------\n >>> naive_cut_rod_recursive(4, [1, 5, 8, 9])\n 10\n >>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])\n 30\n '
_enforce_args(n, prices)
if (n == 0):
return 0
max_revue = float('-inf')
for i in range(1, (n + 1)):
max_revue = max(max_revue, (prices[(i - 1)] + naive_cut_rod_recursive((n - i), prices)))
return max_revue
| 2,580,167,286,780,686,000
|
Solves the rod-cutting problem via naively without using the benefit of dynamic
programming. The results is the same sub-problems are solved several times
leading to an exponential runtime
Runtime: O(2^n)
Arguments
-------
n: int, the length of the rod
prices: list, the prices for each piece of rod. ``p[i-i]`` is the
price for a rod of length ``i``
Returns
-------
The maximum revenue obtainable for a rod of length n given the list of prices
for each piece.
Examples
--------
>>> naive_cut_rod_recursive(4, [1, 5, 8, 9])
10
>>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])
30
|
dynamic_programming/rod_cutting.py
|
naive_cut_rod_recursive
|
AlgorithmAndLeetCode/TheAlgorithms-Python
|
python
|
def naive_cut_rod_recursive(n: int, prices: list):
'\n Solves the rod-cutting problem via naively without using the benefit of dynamic\n programming. The results is the same sub-problems are solved several times\n leading to an exponential runtime\n\n Runtime: O(2^n)\n\n Arguments\n -------\n n: int, the length of the rod\n prices: list, the prices for each piece of rod. ``p[i-i]`` is the\n price for a rod of length ``i``\n\n Returns\n -------\n The maximum revenue obtainable for a rod of length n given the list of prices\n for each piece.\n\n Examples\n --------\n >>> naive_cut_rod_recursive(4, [1, 5, 8, 9])\n 10\n >>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])\n 30\n '
_enforce_args(n, prices)
if (n == 0):
return 0
max_revue = float('-inf')
for i in range(1, (n + 1)):
max_revue = max(max_revue, (prices[(i - 1)] + naive_cut_rod_recursive((n - i), prices)))
return max_revue
|
def top_down_cut_rod(n: int, prices: list):
"\n Constructs a top-down dynamic programming solution for the rod-cutting\n problem via memoization. This function serves as a wrapper for\n _top_down_cut_rod_recursive\n\n Runtime: O(n^2)\n\n Arguments\n --------\n n: int, the length of the rod\n prices: list, the prices for each piece of rod. ``p[i-i]`` is the\n price for a rod of length ``i``\n\n Note\n ----\n For convenience and because Python's lists using 0-indexing, length(max_rev) =\n n + 1, to accommodate for the revenue obtainable from a rod of length 0.\n\n Returns\n -------\n The maximum revenue obtainable for a rod of length n given the list of prices\n for each piece.\n\n Examples\n -------\n >>> top_down_cut_rod(4, [1, 5, 8, 9])\n 10\n >>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])\n 30\n "
_enforce_args(n, prices)
max_rev = [float('-inf') for _ in range((n + 1))]
return _top_down_cut_rod_recursive(n, prices, max_rev)
| 7,519,621,116,936,795,000
|
Constructs a top-down dynamic programming solution for the rod-cutting
problem via memoization. This function serves as a wrapper for
_top_down_cut_rod_recursive
Runtime: O(n^2)
Arguments
--------
n: int, the length of the rod
prices: list, the prices for each piece of rod. ``p[i-i]`` is the
price for a rod of length ``i``
Note
----
For convenience and because Python's lists using 0-indexing, length(max_rev) =
n + 1, to accommodate for the revenue obtainable from a rod of length 0.
Returns
-------
The maximum revenue obtainable for a rod of length n given the list of prices
for each piece.
Examples
-------
>>> top_down_cut_rod(4, [1, 5, 8, 9])
10
>>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])
30
|
dynamic_programming/rod_cutting.py
|
top_down_cut_rod
|
AlgorithmAndLeetCode/TheAlgorithms-Python
|
python
|
def top_down_cut_rod(n: int, prices: list):
"\n Constructs a top-down dynamic programming solution for the rod-cutting\n problem via memoization. This function serves as a wrapper for\n _top_down_cut_rod_recursive\n\n Runtime: O(n^2)\n\n Arguments\n --------\n n: int, the length of the rod\n prices: list, the prices for each piece of rod. ``p[i-i]`` is the\n price for a rod of length ``i``\n\n Note\n ----\n For convenience and because Python's lists using 0-indexing, length(max_rev) =\n n + 1, to accommodate for the revenue obtainable from a rod of length 0.\n\n Returns\n -------\n The maximum revenue obtainable for a rod of length n given the list of prices\n for each piece.\n\n Examples\n -------\n >>> top_down_cut_rod(4, [1, 5, 8, 9])\n 10\n >>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])\n 30\n "
_enforce_args(n, prices)
max_rev = [float('-inf') for _ in range((n + 1))]
return _top_down_cut_rod_recursive(n, prices, max_rev)
|
def _top_down_cut_rod_recursive(n: int, prices: list, max_rev: list):
'\n Constructs a top-down dynamic programming solution for the rod-cutting problem\n via memoization.\n\n Runtime: O(n^2)\n\n Arguments\n --------\n n: int, the length of the rod\n prices: list, the prices for each piece of rod. ``p[i-i]`` is the\n price for a rod of length ``i``\n max_rev: list, the computed maximum revenue for a piece of rod.\n ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i``\n\n Returns\n -------\n The maximum revenue obtainable for a rod of length n given the list of prices\n for each piece.\n '
if (max_rev[n] >= 0):
return max_rev[n]
elif (n == 0):
return 0
else:
max_revenue = float('-inf')
for i in range(1, (n + 1)):
max_revenue = max(max_revenue, (prices[(i - 1)] + _top_down_cut_rod_recursive((n - i), prices, max_rev)))
max_rev[n] = max_revenue
return max_rev[n]
| 3,821,172,561,322,750,000
|
Constructs a top-down dynamic programming solution for the rod-cutting problem
via memoization.
Runtime: O(n^2)
Arguments
--------
n: int, the length of the rod
prices: list, the prices for each piece of rod. ``p[i-i]`` is the
price for a rod of length ``i``
max_rev: list, the computed maximum revenue for a piece of rod.
``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i``
Returns
-------
The maximum revenue obtainable for a rod of length n given the list of prices
for each piece.
|
dynamic_programming/rod_cutting.py
|
_top_down_cut_rod_recursive
|
AlgorithmAndLeetCode/TheAlgorithms-Python
|
python
|
def _top_down_cut_rod_recursive(n: int, prices: list, max_rev: list):
'\n Constructs a top-down dynamic programming solution for the rod-cutting problem\n via memoization.\n\n Runtime: O(n^2)\n\n Arguments\n --------\n n: int, the length of the rod\n prices: list, the prices for each piece of rod. ``p[i-i]`` is the\n price for a rod of length ``i``\n max_rev: list, the computed maximum revenue for a piece of rod.\n ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i``\n\n Returns\n -------\n The maximum revenue obtainable for a rod of length n given the list of prices\n for each piece.\n '
if (max_rev[n] >= 0):
return max_rev[n]
elif (n == 0):
return 0
else:
max_revenue = float('-inf')
for i in range(1, (n + 1)):
max_revenue = max(max_revenue, (prices[(i - 1)] + _top_down_cut_rod_recursive((n - i), prices, max_rev)))
max_rev[n] = max_revenue
return max_rev[n]
|
def bottom_up_cut_rod(n: int, prices: list):
'\n Constructs a bottom-up dynamic programming solution for the rod-cutting problem\n\n Runtime: O(n^2)\n\n Arguments\n ----------\n n: int, the maximum length of the rod.\n prices: list, the prices for each piece of rod. ``p[i-i]`` is the\n price for a rod of length ``i``\n\n Returns\n -------\n The maximum revenue obtainable from cutting a rod of length n given\n the prices for each piece of rod p.\n\n Examples\n -------\n >>> bottom_up_cut_rod(4, [1, 5, 8, 9])\n 10\n >>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])\n 30\n '
_enforce_args(n, prices)
max_rev = [float('-inf') for _ in range((n + 1))]
max_rev[0] = 0
for i in range(1, (n + 1)):
max_revenue_i = max_rev[i]
for j in range(1, (i + 1)):
max_revenue_i = max(max_revenue_i, (prices[(j - 1)] + max_rev[(i - j)]))
max_rev[i] = max_revenue_i
return max_rev[n]
| 3,542,587,413,765,805,000
|
Constructs a bottom-up dynamic programming solution for the rod-cutting problem
Runtime: O(n^2)
Arguments
----------
n: int, the maximum length of the rod.
prices: list, the prices for each piece of rod. ``p[i-i]`` is the
price for a rod of length ``i``
Returns
-------
The maximum revenue obtainable from cutting a rod of length n given
the prices for each piece of rod p.
Examples
-------
>>> bottom_up_cut_rod(4, [1, 5, 8, 9])
10
>>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])
30
|
dynamic_programming/rod_cutting.py
|
bottom_up_cut_rod
|
AlgorithmAndLeetCode/TheAlgorithms-Python
|
python
|
def bottom_up_cut_rod(n: int, prices: list):
'\n Constructs a bottom-up dynamic programming solution for the rod-cutting problem\n\n Runtime: O(n^2)\n\n Arguments\n ----------\n n: int, the maximum length of the rod.\n prices: list, the prices for each piece of rod. ``p[i-i]`` is the\n price for a rod of length ``i``\n\n Returns\n -------\n The maximum revenue obtainable from cutting a rod of length n given\n the prices for each piece of rod p.\n\n Examples\n -------\n >>> bottom_up_cut_rod(4, [1, 5, 8, 9])\n 10\n >>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])\n 30\n '
_enforce_args(n, prices)
max_rev = [float('-inf') for _ in range((n + 1))]
max_rev[0] = 0
for i in range(1, (n + 1)):
max_revenue_i = max_rev[i]
for j in range(1, (i + 1)):
max_revenue_i = max(max_revenue_i, (prices[(j - 1)] + max_rev[(i - j)]))
max_rev[i] = max_revenue_i
return max_rev[n]
|
def _enforce_args(n: int, prices: list):
'\n Basic checks on the arguments to the rod-cutting algorithms\n\n n: int, the length of the rod\n prices: list, the price list for each piece of rod.\n\n Throws ValueError:\n\n if n is negative or there are fewer items in the price list than the length of\n the rod\n '
if (n < 0):
raise ValueError(f'n must be greater than or equal to 0. Got n = {n}')
if (n > len(prices)):
raise ValueError(f'Each integral piece of rod must have a corresponding price. Got n = {n} but length of prices = {len(prices)}')
| -643,413,616,167,749,200
|
Basic checks on the arguments to the rod-cutting algorithms
n: int, the length of the rod
prices: list, the price list for each piece of rod.
Throws ValueError:
if n is negative or there are fewer items in the price list than the length of
the rod
|
dynamic_programming/rod_cutting.py
|
_enforce_args
|
AlgorithmAndLeetCode/TheAlgorithms-Python
|
python
|
def _enforce_args(n: int, prices: list):
'\n Basic checks on the arguments to the rod-cutting algorithms\n\n n: int, the length of the rod\n prices: list, the price list for each piece of rod.\n\n Throws ValueError:\n\n if n is negative or there are fewer items in the price list than the length of\n the rod\n '
if (n < 0):
raise ValueError(f'n must be greater than or equal to 0. Got n = {n}')
if (n > len(prices)):
raise ValueError(f'Each integral piece of rod must have a corresponding price. Got n = {n} but length of prices = {len(prices)}')
|
def convert_gis_to_geodata(net, node_geodata=True, branch_geodata=True):
'\n Extracts information on bus and line geodata from the geometries of a geopandas geodataframe.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param node_geodata: flag if to extract x and y values for bus geodata\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to extract coordinates values for line geodata\n :type branch_geodata: bool, default True\n :return: No output.\n '
if node_geodata:
_transform_node_geometry_to_geodata(net.junction_geodata)
if branch_geodata:
_transform_branch_geometry_to_coords(net.pipe_geodata)
| 540,785,274,749,335,400
|
Extracts information on bus and line geodata from the geometries of a geopandas geodataframe.
:param net: The net for which to convert the geodata
:type net: pandapowerNet
:param node_geodata: flag if to extract x and y values for bus geodata
:type node_geodata: bool, default True
:param branch_geodata: flag if to extract coordinates values for line geodata
:type branch_geodata: bool, default True
:return: No output.
|
pandapipes/plotting/geo.py
|
convert_gis_to_geodata
|
Fraank-dash/pandapipes
|
python
|
def convert_gis_to_geodata(net, node_geodata=True, branch_geodata=True):
'\n Extracts information on bus and line geodata from the geometries of a geopandas geodataframe.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param node_geodata: flag if to extract x and y values for bus geodata\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to extract coordinates values for line geodata\n :type branch_geodata: bool, default True\n :return: No output.\n '
if node_geodata:
_transform_node_geometry_to_geodata(net.junction_geodata)
if branch_geodata:
_transform_branch_geometry_to_coords(net.pipe_geodata)
|
def convert_geodata_to_gis(net, epsg=31467, node_geodata=True, branch_geodata=True):
'\n Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective\n geometries.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param epsg: current epsg projection\n :type epsg: int, default 4326 (= WGS84)\n :param node_geodata: flag if to transform the bus geodata table\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to transform the line geodata table\n :type branch_geodata: bool, default True\n :return: No output.\n '
if node_geodata:
net['bus_geodata'] = _node_geometries_from_geodata(net['bus_geodata'], epsg)
if branch_geodata:
net['line_geodata'] = _branch_geometries_from_geodata(net['line_geodata'], epsg)
net['gis_epsg_code'] = epsg
| 1,760,503,386,158,919,700
|
Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective
geometries.
:param net: The net for which to convert the geodata
:type net: pandapowerNet
:param epsg: current epsg projection
:type epsg: int, default 4326 (= WGS84)
:param node_geodata: flag if to transform the bus geodata table
:type node_geodata: bool, default True
:param branch_geodata: flag if to transform the line geodata table
:type branch_geodata: bool, default True
:return: No output.
|
pandapipes/plotting/geo.py
|
convert_geodata_to_gis
|
Fraank-dash/pandapipes
|
python
|
def convert_geodata_to_gis(net, epsg=31467, node_geodata=True, branch_geodata=True):
'\n Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective\n geometries.\n\n :param net: The net for which to convert the geodata\n :type net: pandapowerNet\n :param epsg: current epsg projection\n :type epsg: int, default 4326 (= WGS84)\n :param node_geodata: flag if to transform the bus geodata table\n :type node_geodata: bool, default True\n :param branch_geodata: flag if to transform the line geodata table\n :type branch_geodata: bool, default True\n :return: No output.\n '
if node_geodata:
net['bus_geodata'] = _node_geometries_from_geodata(net['bus_geodata'], epsg)
if branch_geodata:
net['line_geodata'] = _branch_geometries_from_geodata(net['line_geodata'], epsg)
net['gis_epsg_code'] = epsg
|
def convert_epsg_bus_geodata(net, epsg_in=4326, epsg_out=31467):
'\n Converts bus geodata in net from epsg_in to epsg_out\n\n :param net: The pandapower network\n :type net: pandapowerNet\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3)\n :return: net - the given pandapower network (no copy!)\n '
(net['bus_geodata'].loc[:, 'x'], net['bus_geodata'].loc[:, 'y']) = _convert_xy_epsg(net['bus_geodata'].loc[:, 'x'], net['bus_geodata'].loc[:, 'y'], epsg_in, epsg_out)
return net
| 6,620,877,165,603,603,000
|
Converts bus geodata in net from epsg_in to epsg_out
:param net: The pandapower network
:type net: pandapowerNet
:param epsg_in: current epsg projection
:type epsg_in: int, default 4326 (= WGS84)
:param epsg_out: epsg projection to be transformed to
:type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3)
:return: net - the given pandapower network (no copy!)
|
pandapipes/plotting/geo.py
|
convert_epsg_bus_geodata
|
Fraank-dash/pandapipes
|
python
|
def convert_epsg_bus_geodata(net, epsg_in=4326, epsg_out=31467):
'\n Converts bus geodata in net from epsg_in to epsg_out\n\n :param net: The pandapower network\n :type net: pandapowerNet\n :param epsg_in: current epsg projection\n :type epsg_in: int, default 4326 (= WGS84)\n :param epsg_out: epsg projection to be transformed to\n :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3)\n :return: net - the given pandapower network (no copy!)\n '
(net['bus_geodata'].loc[:, 'x'], net['bus_geodata'].loc[:, 'y']) = _convert_xy_epsg(net['bus_geodata'].loc[:, 'x'], net['bus_geodata'].loc[:, 'y'], epsg_in, epsg_out)
return net
|
@csrf_exempt
def addChangeHostInfo(request):
'\n 新增主机\n 修改主机\n '
v_hostId = request.POST.get('host_id')
v_businessName = request.POST.get('business_name')
v_serviceEnv = request.POST.get('service_env')
v_hostName = request.POST.get('host_name')
v_intranetIpAddr = request.POST.get('intranet_ipaddr')
v_publicIpAddr = request.POST.get('public_ipaddr')
v_sshPort = request.POST.get('ssh_port')
v_hostType = request.POST.get('host_type')
v_hostRole = request.POST.get('host_role')
v_hostDesc = request.POST.get('host_desc')
print(v_hostId, v_businessName, v_serviceEnv, v_hostName, v_intranetIpAddr, v_publicIpAddr, v_sshPort, v_hostType, v_hostRole, v_hostDesc)
if ((v_hostId == '') or (v_hostId is None)):
try:
hostObj = host(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc)
hostObj.save()
result = {'status': 1, 'msg': '保存成功!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
result = {'status': 2, 'msg': ('保存失败!' + str(e)), 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
hostObj = host.objects.filter(id=v_hostId)
hostObj.update(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc)
result = {'status': 1, 'msg': '修改成功!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
result = {'status': 2, 'msg': ('修改失败!' + str(e)), 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
| 2,577,743,367,787,272,700
|
新增主机
修改主机
|
cmdb/views_ajax.py
|
addChangeHostInfo
|
bopopescu/dbsupport
|
python
|
@csrf_exempt
def addChangeHostInfo(request):
'\n 新增主机\n 修改主机\n '
v_hostId = request.POST.get('host_id')
v_businessName = request.POST.get('business_name')
v_serviceEnv = request.POST.get('service_env')
v_hostName = request.POST.get('host_name')
v_intranetIpAddr = request.POST.get('intranet_ipaddr')
v_publicIpAddr = request.POST.get('public_ipaddr')
v_sshPort = request.POST.get('ssh_port')
v_hostType = request.POST.get('host_type')
v_hostRole = request.POST.get('host_role')
v_hostDesc = request.POST.get('host_desc')
print(v_hostId, v_businessName, v_serviceEnv, v_hostName, v_intranetIpAddr, v_publicIpAddr, v_sshPort, v_hostType, v_hostRole, v_hostDesc)
if ((v_hostId == ) or (v_hostId is None)):
try:
hostObj = host(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc)
hostObj.save()
result = {'status': 1, 'msg': '保存成功!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
result = {'status': 2, 'msg': ('保存失败!' + str(e)), 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
hostObj = host.objects.filter(id=v_hostId)
hostObj.update(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc)
result = {'status': 1, 'msg': '修改成功!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
result = {'status': 2, 'msg': ('修改失败!' + str(e)), 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
|
@csrf_exempt
def addChangeHostUserInfo(request):
'\n 新增主机用户\n 修改主机用户\n '
v_hostUserId = request.POST.get('host_user_id')
v_hostId = request.POST.get('host_id')
v_hostUser = request.POST.get('host_user')
v_hostPasswd = request.POST.get('host_passwd')
v_userDesc = request.POST.get('user_desc')
print(v_hostUserId, v_hostId, v_hostUser, v_hostPasswd, v_userDesc)
if ((v_hostUserId == '') or (v_hostUserId is None)):
try:
hostObj = host.objects.get(id=v_hostId)
hostUserObj = hostUser(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc, host=hostObj)
hostUserObj.save()
result = {'status': 1, 'msg': '保存成功!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '保存失败!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
hostUserObj = hostUser.objects.filter(id=v_hostUserId)
hostUserObj.update(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc)
result = {'status': 1, 'msg': '修改成功!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '修改失败!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
| 84,332,024,188,917,280
|
新增主机用户
修改主机用户
|
cmdb/views_ajax.py
|
addChangeHostUserInfo
|
bopopescu/dbsupport
|
python
|
@csrf_exempt
def addChangeHostUserInfo(request):
'\n 新增主机用户\n 修改主机用户\n '
v_hostUserId = request.POST.get('host_user_id')
v_hostId = request.POST.get('host_id')
v_hostUser = request.POST.get('host_user')
v_hostPasswd = request.POST.get('host_passwd')
v_userDesc = request.POST.get('user_desc')
print(v_hostUserId, v_hostId, v_hostUser, v_hostPasswd, v_userDesc)
if ((v_hostUserId == ) or (v_hostUserId is None)):
try:
hostObj = host.objects.get(id=v_hostId)
hostUserObj = hostUser(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc, host=hostObj)
hostUserObj.save()
result = {'status': 1, 'msg': '保存成功!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '保存失败!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
hostUserObj = hostUser.objects.filter(id=v_hostUserId)
hostUserObj.update(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc)
result = {'status': 1, 'msg': '修改成功!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '修改失败!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
|
@csrf_exempt
def addChangeDbGroupInfo(request):
'\n 新增数据库组\n 修改数据库组\n '
v_groupId = request.POST.get('group_id')
v_businessName = request.POST.get('business_name')
v_groupName = request.POST.get('group_name')
v_groupStatus = request.POST.get('group_status')
v_groupDesc = request.POST.get('group_desc')
v_groupEnv = request.POST.get('group_env')
print(v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc)
logger.info('保存或修改数据库组信息,接收前端参数:', v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc)
if ((v_groupId == '') or (v_groupId is None)):
try:
dbGroupObj = dbGroup(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc)
dbGroupObj.save()
result = {'status': 1, 'msg': '保存成功!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '保存失败!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
dbGroupObj = dbGroup.objects.filter(id=v_groupId)
dbGroupObj.update(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc)
result = {'status': 1, 'msg': '修改成功!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '修改失败!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
| 7,565,920,559,405,733,000
|
新增数据库组
修改数据库组
|
cmdb/views_ajax.py
|
addChangeDbGroupInfo
|
bopopescu/dbsupport
|
python
|
@csrf_exempt
def addChangeDbGroupInfo(request):
'\n 新增数据库组\n 修改数据库组\n '
v_groupId = request.POST.get('group_id')
v_businessName = request.POST.get('business_name')
v_groupName = request.POST.get('group_name')
v_groupStatus = request.POST.get('group_status')
v_groupDesc = request.POST.get('group_desc')
v_groupEnv = request.POST.get('group_env')
print(v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc)
logger.info('保存或修改数据库组信息,接收前端参数:', v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc)
if ((v_groupId == ) or (v_groupId is None)):
try:
dbGroupObj = dbGroup(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc)
dbGroupObj.save()
result = {'status': 1, 'msg': '保存成功!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '保存失败!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
dbGroupObj = dbGroup.objects.filter(id=v_groupId)
dbGroupObj.update(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc)
result = {'status': 1, 'msg': '修改成功!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '修改失败!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
|
@csrf_exempt
def addChangeDbInstanceInfo(request):
'\n 新增数据库实例\n 修改数据库实例\n '
v_instanceId = request.POST.get('instance_id')
v_groupId = request.POST.get('group_id')
v_host_id = request.POST.get('host_id')
v_instanceName = request.POST.get('instance_env')
v_instanceType = request.POST.get('instance_type')
v_portNum = request.POST.get('port_num')
v_instanceRole = request.POST.get('instance_role')
v_instanceStatus = request.POST.get('instance_status')
v_instanceDesc = request.POST.get('instance_desc')
print(v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc)
logger.info('保存或修改数据库实例信息,接收前端参数:', v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc)
if ((v_instanceId == '') or (v_instanceId is None)):
try:
dbGroupObj = dbGroup.objects.get(id=v_groupId)
hostObj = host.objects.get(id=v_host_id)
print(hostObj)
dbInstanceObj = dbInstance(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc)
dbInstanceObj.save()
result = {'status': 1, 'msg': '保存成功!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
logger.error(str(e))
result = {'status': 2, 'msg': '保存失败!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
dbGroupObj = dbGroup.objects.get(id=v_groupId)
hostObj = host.objects.get(id=v_host_id)
dbInstanceObj = dbInstance.objects.filter(id=v_instanceId)
dbInstanceObj.update(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc)
result = {'status': 1, 'msg': '修改成功!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '修改失败!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
| -1,685,775,773,127,927,800
|
新增数据库实例
修改数据库实例
|
cmdb/views_ajax.py
|
addChangeDbInstanceInfo
|
bopopescu/dbsupport
|
python
|
@csrf_exempt
def addChangeDbInstanceInfo(request):
'\n 新增数据库实例\n 修改数据库实例\n '
v_instanceId = request.POST.get('instance_id')
v_groupId = request.POST.get('group_id')
v_host_id = request.POST.get('host_id')
v_instanceName = request.POST.get('instance_env')
v_instanceType = request.POST.get('instance_type')
v_portNum = request.POST.get('port_num')
v_instanceRole = request.POST.get('instance_role')
v_instanceStatus = request.POST.get('instance_status')
v_instanceDesc = request.POST.get('instance_desc')
print(v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc)
logger.info('保存或修改数据库实例信息,接收前端参数:', v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc)
if ((v_instanceId == ) or (v_instanceId is None)):
try:
dbGroupObj = dbGroup.objects.get(id=v_groupId)
hostObj = host.objects.get(id=v_host_id)
print(hostObj)
dbInstanceObj = dbInstance(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc)
dbInstanceObj.save()
result = {'status': 1, 'msg': '保存成功!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
logger.error(str(e))
result = {'status': 2, 'msg': '保存失败!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
dbGroupObj = dbGroup.objects.get(id=v_groupId)
hostObj = host.objects.get(id=v_host_id)
dbInstanceObj = dbInstance.objects.filter(id=v_instanceId)
dbInstanceObj.update(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc)
result = {'status': 1, 'msg': '修改成功!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status': 2, 'msg': '修改失败!', 'data': }
return HttpResponse(json.dumps(result), content_type='application/json')
|
def _get_size(self, tokenL):
' return the size of the next object.'
if (tokenL == 15):
m = (ord(self._fp.read(1)[0]) & 3)
s = (1 << m)
f = ('>' + _BINARY_FORMAT[s])
return struct.unpack(f, self._fp.read(s))[0]
return tokenL
| 1,455,404,657,340,850,700
|
return the size of the next object.
|
Scripts/plist.py
|
_get_size
|
640921008/gibMacOS
|
python
|
def _get_size(self, tokenL):
' '
if (tokenL == 15):
m = (ord(self._fp.read(1)[0]) & 3)
s = (1 << m)
f = ('>' + _BINARY_FORMAT[s])
return struct.unpack(f, self._fp.read(s))[0]
return tokenL
|
def _read_object(self, ref):
'\n read the object by reference.\n May recursively read sub-objects (content of an array/dict/set)\n '
result = self._objects[ref]
if (result is not _undefined):
return result
offset = self._object_offsets[ref]
self._fp.seek(offset)
token = ord(self._fp.read(1)[0])
(tokenH, tokenL) = ((token & 240), (token & 15))
if (token == 0):
result = None
elif (token == 8):
result = False
elif (token == 9):
result = True
elif (token == 15):
result = b''
elif (tokenH == 16):
result = 0
for k in xrange(((2 << tokenL) - 1)):
result = ((result << 8) + ord(self._fp.read(1)))
elif (token == 34):
result = struct.unpack('>f', self._fp.read(4))[0]
elif (token == 35):
result = struct.unpack('>d', self._fp.read(8))[0]
elif (token == 51):
f = struct.unpack('>d', self._fp.read(8))[0]
result = (datetime.datetime(2001, 1, 1) + datetime.timedelta(seconds=f))
elif (tokenH == 64):
s = self._get_size(tokenL)
if self._use_builtin_types:
result = self._fp.read(s)
else:
result = plistlib.Data(self._fp.read(s))
elif (tokenH == 80):
s = self._get_size(tokenL)
result = self._fp.read(s).decode('ascii')
result = result
elif (tokenH == 96):
s = self._get_size(tokenL)
result = self._fp.read((s * 2)).decode('utf-16be')
elif (tokenH == 160):
s = self._get_size(tokenL)
obj_refs = self._read_refs(s)
result = []
self._objects[ref] = result
result.extend((self._read_object(x) for x in obj_refs))
elif (tokenH == 208):
s = self._get_size(tokenL)
key_refs = self._read_refs(s)
obj_refs = self._read_refs(s)
result = self._dict_type()
self._objects[ref] = result
for (k, o) in zip(key_refs, obj_refs):
key = self._read_object(k)
if isinstance(key, plistlib.Data):
key = key.data
result[key] = self._read_object(o)
else:
raise InvalidFileException()
self._objects[ref] = result
return result
| 5,374,556,891,309,387,000
|
read the object by reference.
May recursively read sub-objects (content of an array/dict/set)
|
Scripts/plist.py
|
_read_object
|
640921008/gibMacOS
|
python
|
def _read_object(self, ref):
'\n read the object by reference.\n May recursively read sub-objects (content of an array/dict/set)\n '
result = self._objects[ref]
if (result is not _undefined):
return result
offset = self._object_offsets[ref]
self._fp.seek(offset)
token = ord(self._fp.read(1)[0])
(tokenH, tokenL) = ((token & 240), (token & 15))
if (token == 0):
result = None
elif (token == 8):
result = False
elif (token == 9):
result = True
elif (token == 15):
result = b
elif (tokenH == 16):
result = 0
for k in xrange(((2 << tokenL) - 1)):
result = ((result << 8) + ord(self._fp.read(1)))
elif (token == 34):
result = struct.unpack('>f', self._fp.read(4))[0]
elif (token == 35):
result = struct.unpack('>d', self._fp.read(8))[0]
elif (token == 51):
f = struct.unpack('>d', self._fp.read(8))[0]
result = (datetime.datetime(2001, 1, 1) + datetime.timedelta(seconds=f))
elif (tokenH == 64):
s = self._get_size(tokenL)
if self._use_builtin_types:
result = self._fp.read(s)
else:
result = plistlib.Data(self._fp.read(s))
elif (tokenH == 80):
s = self._get_size(tokenL)
result = self._fp.read(s).decode('ascii')
result = result
elif (tokenH == 96):
s = self._get_size(tokenL)
result = self._fp.read((s * 2)).decode('utf-16be')
elif (tokenH == 160):
s = self._get_size(tokenL)
obj_refs = self._read_refs(s)
result = []
self._objects[ref] = result
result.extend((self._read_object(x) for x in obj_refs))
elif (tokenH == 208):
s = self._get_size(tokenL)
key_refs = self._read_refs(s)
obj_refs = self._read_refs(s)
result = self._dict_type()
self._objects[ref] = result
for (k, o) in zip(key_refs, obj_refs):
key = self._read_object(k)
if isinstance(key, plistlib.Data):
key = key.data
result[key] = self._read_object(o)
else:
raise InvalidFileException()
self._objects[ref] = result
return result
|
def querybuild(cls, **kwargs):
"\n Instantiates and returns a QueryBuilder instance.\n\n The QueryBuilder's path has one vertice so far, namely this class.\n Additional parameters (e.g. filters or a label),\n can be passes as keyword arguments.\n\n :param label: Label to give\n :param filters: filters to apply\n :param project: projections\n :returns: a QueryBuilder instance.\n "
from aiida.orm import QueryBuilder
query_builder = QueryBuilder()
filters = kwargs.pop('filters', {})
query_builder.append(cls, filters=filters, **kwargs)
return query_builder
| 6,510,601,466,728,924,000
|
Instantiates and returns a QueryBuilder instance.
The QueryBuilder's path has one vertice so far, namely this class.
Additional parameters (e.g. filters or a label),
can be passes as keyword arguments.
:param label: Label to give
:param filters: filters to apply
:param project: projections
:returns: a QueryBuilder instance.
|
aiida_vasp/utils/aiida_utils.py
|
querybuild
|
kavanase/aiida-vasp
|
python
|
def querybuild(cls, **kwargs):
"\n Instantiates and returns a QueryBuilder instance.\n\n The QueryBuilder's path has one vertice so far, namely this class.\n Additional parameters (e.g. filters or a label),\n can be passes as keyword arguments.\n\n :param label: Label to give\n :param filters: filters to apply\n :param project: projections\n :returns: a QueryBuilder instance.\n "
from aiida.orm import QueryBuilder
query_builder = QueryBuilder()
filters = kwargs.pop('filters', {})
query_builder.append(cls, filters=filters, **kwargs)
return query_builder
|
@with_dbenv()
def get_data_class(data_type):
'Provide access to the orm.data classes with deferred dbenv loading.'
from aiida.plugins import DataFactory
from aiida.common.exceptions import MissingEntryPointError
data_cls = None
try:
data_cls = DataFactory(data_type)
except MissingEntryPointError as err:
raise err
return data_cls
| -8,014,482,675,913,156,000
|
Provide access to the orm.data classes with deferred dbenv loading.
|
aiida_vasp/utils/aiida_utils.py
|
get_data_class
|
kavanase/aiida-vasp
|
python
|
@with_dbenv()
def get_data_class(data_type):
from aiida.plugins import DataFactory
from aiida.common.exceptions import MissingEntryPointError
data_cls = None
try:
data_cls = DataFactory(data_type)
except MissingEntryPointError as err:
raise err
return data_cls
|
def get_current_user():
'Get current user.'
current_user = User.objects.get_default()
return current_user
| 306,002,596,648,056,500
|
Get current user.
|
aiida_vasp/utils/aiida_utils.py
|
get_current_user
|
kavanase/aiida-vasp
|
python
|
def get_current_user():
current_user = User.objects.get_default()
return current_user
|
def copy_parameter(old_parameter):
'Assemble a new Dict.'
return get_data_node('dict', dict=old_parameter.get_dict())
| -5,951,678,597,882,443,000
|
Assemble a new Dict.
|
aiida_vasp/utils/aiida_utils.py
|
copy_parameter
|
kavanase/aiida-vasp
|
python
|
def copy_parameter(old_parameter):
return get_data_node('dict', dict=old_parameter.get_dict())
|
def displace_position(structure, displacement, entry):
'Displace a position in the StructureData.'
sites = structure.sites
positions = []
for site in sites:
positions.append(site.position)
new_position = (np.asarray(positions[(entry - 1)]) + displacement)
new_position = new_position.tolist()
positions[(entry - 1)] = tuple(new_position)
structure.reset_sites_positions(positions)
| -5,977,429,901,558,767,000
|
Displace a position in the StructureData.
|
aiida_vasp/utils/aiida_utils.py
|
displace_position
|
kavanase/aiida-vasp
|
python
|
def displace_position(structure, displacement, entry):
sites = structure.sites
positions = []
for site in sites:
positions.append(site.position)
new_position = (np.asarray(positions[(entry - 1)]) + displacement)
new_position = new_position.tolist()
positions[(entry - 1)] = tuple(new_position)
structure.reset_sites_positions(positions)
|
def compress_cell(structure, volume_change):
'Apply compression or tensile forces to the unit cell.'
cell = structure.cell
new_cell = (np.array(cell) * volume_change)
structure.reset_cell(new_cell.tolist())
| -589,114,200,885,345,800
|
Apply compression or tensile forces to the unit cell.
|
aiida_vasp/utils/aiida_utils.py
|
compress_cell
|
kavanase/aiida-vasp
|
python
|
def compress_cell(structure, volume_change):
cell = structure.cell
new_cell = (np.array(cell) * volume_change)
structure.reset_cell(new_cell.tolist())
|
def cmp_load_verdi_data():
'Load the verdi data click command group for any version since 0.11.'
verdi_data = None
import_errors = []
try:
from aiida.cmdline.commands import data_cmd as verdi_data
except ImportError as err:
import_errors.append(err)
if (not verdi_data):
try:
from aiida.cmdline.commands import verdi_data
except ImportError as err:
import_errors.append(err)
if (not verdi_data):
try:
from aiida.cmdline.commands.cmd_data import verdi_data
except ImportError as err:
import_errors.append(err)
if (not verdi_data):
err_messages = '\n'.join([' * {}'.format(err) for err in import_errors])
raise ImportError(('The verdi data base command group could not be found:\n' + err_messages))
return verdi_data
| -1,284,042,890,687,403,800
|
Load the verdi data click command group for any version since 0.11.
|
aiida_vasp/utils/aiida_utils.py
|
cmp_load_verdi_data
|
kavanase/aiida-vasp
|
python
|
def cmp_load_verdi_data():
verdi_data = None
import_errors = []
try:
from aiida.cmdline.commands import data_cmd as verdi_data
except ImportError as err:
import_errors.append(err)
if (not verdi_data):
try:
from aiida.cmdline.commands import verdi_data
except ImportError as err:
import_errors.append(err)
if (not verdi_data):
try:
from aiida.cmdline.commands.cmd_data import verdi_data
except ImportError as err:
import_errors.append(err)
if (not verdi_data):
err_messages = '\n'.join([' * {}'.format(err) for err in import_errors])
raise ImportError(('The verdi data base command group could not be found:\n' + err_messages))
return verdi_data
|
def create_authinfo(computer, store=False):
'Allow the current user to use the given computer.'
from aiida.orm import AuthInfo
authinfo = AuthInfo(computer=computer, user=get_current_user())
if store:
authinfo.store()
return authinfo
| -1,969,563,057,198,590,200
|
Allow the current user to use the given computer.
|
aiida_vasp/utils/aiida_utils.py
|
create_authinfo
|
kavanase/aiida-vasp
|
python
|
def create_authinfo(computer, store=False):
from aiida.orm import AuthInfo
authinfo = AuthInfo(computer=computer, user=get_current_user())
if store:
authinfo.store()
return authinfo
|
def cmp_get_authinfo(computer):
'Get an existing authinfo or None for the given computer and current user.'
return computer.get_authinfo(get_current_user())
| -3,414,521,374,861,117,400
|
Get an existing authinfo or None for the given computer and current user.
|
aiida_vasp/utils/aiida_utils.py
|
cmp_get_authinfo
|
kavanase/aiida-vasp
|
python
|
def cmp_get_authinfo(computer):
return computer.get_authinfo(get_current_user())
|
@paddle.no_grad()
def get_offset(self, anchors, featmap_size, stride):
'\n Args:\n anchors: [M,5] xc,yc,w,h,angle\n featmap_size: (feat_h, feat_w)\n stride: 8\n Returns:\n\n '
anchors = paddle.reshape(anchors, [(- 1), 5])
dtype = anchors.dtype
feat_h = featmap_size[0]
feat_w = featmap_size[1]
pad = ((self.kernel_size - 1) // 2)
idx = paddle.arange((- pad), (pad + 1), dtype=dtype)
(yy, xx) = paddle.meshgrid(idx, idx)
xx = paddle.reshape(xx, [(- 1)])
yy = paddle.reshape(yy, [(- 1)])
xc = paddle.arange(0, feat_w, dtype=dtype)
yc = paddle.arange(0, feat_h, dtype=dtype)
(yc, xc) = paddle.meshgrid(yc, xc)
xc = paddle.reshape(xc, [(- 1), 1])
yc = paddle.reshape(yc, [(- 1), 1])
x_conv = (xc + xx)
y_conv = (yc + yy)
x_ctr = anchors[:, 0]
y_ctr = anchors[:, 1]
w = anchors[:, 2]
h = anchors[:, 3]
a = anchors[:, 4]
x_ctr = paddle.reshape(x_ctr, [(- 1), 1])
y_ctr = paddle.reshape(y_ctr, [(- 1), 1])
w = paddle.reshape(w, [(- 1), 1])
h = paddle.reshape(h, [(- 1), 1])
a = paddle.reshape(a, [(- 1), 1])
x_ctr = (x_ctr / stride)
y_ctr = (y_ctr / stride)
w_s = (w / stride)
h_s = (h / stride)
(cos, sin) = (paddle.cos(a), paddle.sin(a))
(dw, dh) = ((w_s / self.kernel_size), (h_s / self.kernel_size))
(x, y) = ((dw * xx), (dh * yy))
xr = ((cos * x) - (sin * y))
yr = ((sin * x) + (cos * y))
(x_anchor, y_anchor) = ((xr + x_ctr), (yr + y_ctr))
offset_x = (x_anchor - x_conv)
offset_y = (y_anchor - y_conv)
offset = paddle.stack([offset_y, offset_x], axis=(- 1))
offset = paddle.reshape(offset, [(feat_h * feat_w), ((self.kernel_size * self.kernel_size) * 2)])
offset = paddle.transpose(offset, [1, 0])
offset = paddle.reshape(offset, [1, ((self.kernel_size * self.kernel_size) * 2), feat_h, feat_w])
return offset
| 5,071,276,570,165,933,000
|
Args:
anchors: [M,5] xc,yc,w,h,angle
featmap_size: (feat_h, feat_w)
stride: 8
Returns:
|
ppdet/modeling/heads/s2anet_head.py
|
get_offset
|
1190202328/PaddleDetection
|
python
|
@paddle.no_grad()
def get_offset(self, anchors, featmap_size, stride):
'\n Args:\n anchors: [M,5] xc,yc,w,h,angle\n featmap_size: (feat_h, feat_w)\n stride: 8\n Returns:\n\n '
anchors = paddle.reshape(anchors, [(- 1), 5])
dtype = anchors.dtype
feat_h = featmap_size[0]
feat_w = featmap_size[1]
pad = ((self.kernel_size - 1) // 2)
idx = paddle.arange((- pad), (pad + 1), dtype=dtype)
(yy, xx) = paddle.meshgrid(idx, idx)
xx = paddle.reshape(xx, [(- 1)])
yy = paddle.reshape(yy, [(- 1)])
xc = paddle.arange(0, feat_w, dtype=dtype)
yc = paddle.arange(0, feat_h, dtype=dtype)
(yc, xc) = paddle.meshgrid(yc, xc)
xc = paddle.reshape(xc, [(- 1), 1])
yc = paddle.reshape(yc, [(- 1), 1])
x_conv = (xc + xx)
y_conv = (yc + yy)
x_ctr = anchors[:, 0]
y_ctr = anchors[:, 1]
w = anchors[:, 2]
h = anchors[:, 3]
a = anchors[:, 4]
x_ctr = paddle.reshape(x_ctr, [(- 1), 1])
y_ctr = paddle.reshape(y_ctr, [(- 1), 1])
w = paddle.reshape(w, [(- 1), 1])
h = paddle.reshape(h, [(- 1), 1])
a = paddle.reshape(a, [(- 1), 1])
x_ctr = (x_ctr / stride)
y_ctr = (y_ctr / stride)
w_s = (w / stride)
h_s = (h / stride)
(cos, sin) = (paddle.cos(a), paddle.sin(a))
(dw, dh) = ((w_s / self.kernel_size), (h_s / self.kernel_size))
(x, y) = ((dw * xx), (dh * yy))
xr = ((cos * x) - (sin * y))
yr = ((sin * x) + (cos * y))
(x_anchor, y_anchor) = ((xr + x_ctr), (yr + y_ctr))
offset_x = (x_anchor - x_conv)
offset_y = (y_anchor - y_conv)
offset = paddle.stack([offset_y, offset_x], axis=(- 1))
offset = paddle.reshape(offset, [(feat_h * feat_w), ((self.kernel_size * self.kernel_size) * 2)])
offset = paddle.transpose(offset, [1, 0])
offset = paddle.reshape(offset, [1, ((self.kernel_size * self.kernel_size) * 2), feat_h, feat_w])
return offset
|
def smooth_l1_loss(self, pred, label, delta=(1.0 / 9.0)):
'\n Args:\n pred: pred score\n label: label\n delta: delta\n Returns: loss\n '
assert ((pred.shape == label.shape) and (label.numel() > 0))
assert (delta > 0)
diff = paddle.abs((pred - label))
loss = paddle.where((diff < delta), (((0.5 * diff) * diff) / delta), (diff - (0.5 * delta)))
return loss
| 886,629,821,782,605,200
|
Args:
pred: pred score
label: label
delta: delta
Returns: loss
|
ppdet/modeling/heads/s2anet_head.py
|
smooth_l1_loss
|
1190202328/PaddleDetection
|
python
|
def smooth_l1_loss(self, pred, label, delta=(1.0 / 9.0)):
'\n Args:\n pred: pred score\n label: label\n delta: delta\n Returns: loss\n '
assert ((pred.shape == label.shape) and (label.numel() > 0))
assert (delta > 0)
diff = paddle.abs((pred - label))
loss = paddle.where((diff < delta), (((0.5 * diff) * diff) / delta), (diff - (0.5 * delta)))
return loss
|
def rect2rbox(self, bboxes):
'\n :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax)\n :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle)\n '
bboxes = paddle.reshape(bboxes, [(- 1), 4])
num_boxes = paddle.shape(bboxes)[0]
x_ctr = ((bboxes[:, 2] + bboxes[:, 0]) / 2.0)
y_ctr = ((bboxes[:, 3] + bboxes[:, 1]) / 2.0)
edges1 = paddle.abs((bboxes[:, 2] - bboxes[:, 0]))
edges2 = paddle.abs((bboxes[:, 3] - bboxes[:, 1]))
rbox_w = paddle.maximum(edges1, edges2)
rbox_h = paddle.minimum(edges1, edges2)
inds = (edges1 < edges2)
inds = paddle.cast(inds, 'int32')
rboxes_angle = ((inds * np.pi) / 2.0)
rboxes = paddle.stack((x_ctr, y_ctr, rbox_w, rbox_h, rboxes_angle), axis=1)
return rboxes
| 8,184,311,432,423,110,000
|
:param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax)
:return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle)
|
ppdet/modeling/heads/s2anet_head.py
|
rect2rbox
|
1190202328/PaddleDetection
|
python
|
def rect2rbox(self, bboxes):
'\n :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax)\n :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle)\n '
bboxes = paddle.reshape(bboxes, [(- 1), 4])
num_boxes = paddle.shape(bboxes)[0]
x_ctr = ((bboxes[:, 2] + bboxes[:, 0]) / 2.0)
y_ctr = ((bboxes[:, 3] + bboxes[:, 1]) / 2.0)
edges1 = paddle.abs((bboxes[:, 2] - bboxes[:, 0]))
edges2 = paddle.abs((bboxes[:, 3] - bboxes[:, 1]))
rbox_w = paddle.maximum(edges1, edges2)
rbox_h = paddle.minimum(edges1, edges2)
inds = (edges1 < edges2)
inds = paddle.cast(inds, 'int32')
rboxes_angle = ((inds * np.pi) / 2.0)
rboxes = paddle.stack((x_ctr, y_ctr, rbox_w, rbox_h, rboxes_angle), axis=1)
return rboxes
|
def delta2rbox(self, rrois, deltas, wh_ratio_clip=1e-06):
'\n :param rrois: (cx, cy, w, h, theta)\n :param deltas: (dx, dy, dw, dh, dtheta)\n :param means: means of anchor\n :param stds: stds of anchor\n :param wh_ratio_clip: clip threshold of wh_ratio\n :return:\n '
deltas = paddle.reshape(deltas, [(- 1), 5])
rrois = paddle.reshape(rrois, [(- 1), 5])
denorm_deltas = paddle.add(paddle.multiply(deltas, self.stds), self.means)
dx = denorm_deltas[:, 0]
dy = denorm_deltas[:, 1]
dw = denorm_deltas[:, 2]
dh = denorm_deltas[:, 3]
dangle = denorm_deltas[:, 4]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = paddle.clip(dw, min=(- max_ratio), max=max_ratio)
dh = paddle.clip(dh, min=(- max_ratio), max=max_ratio)
rroi_x = rrois[:, 0]
rroi_y = rrois[:, 1]
rroi_w = rrois[:, 2]
rroi_h = rrois[:, 3]
rroi_angle = rrois[:, 4]
gx = ((((dx * rroi_w) * paddle.cos(rroi_angle)) - ((dy * rroi_h) * paddle.sin(rroi_angle))) + rroi_x)
gy = ((((dx * rroi_w) * paddle.sin(rroi_angle)) + ((dy * rroi_h) * paddle.cos(rroi_angle))) + rroi_y)
gw = (rroi_w * dw.exp())
gh = (rroi_h * dh.exp())
ga = ((np.pi * dangle) + rroi_angle)
ga = (((ga + (np.pi / 4)) % np.pi) - (np.pi / 4))
ga = paddle.to_tensor(ga)
gw = paddle.to_tensor(gw, dtype='float32')
gh = paddle.to_tensor(gh, dtype='float32')
bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=(- 1))
return bboxes
| 7,948,377,434,776,396,000
|
:param rrois: (cx, cy, w, h, theta)
:param deltas: (dx, dy, dw, dh, dtheta)
:param means: means of anchor
:param stds: stds of anchor
:param wh_ratio_clip: clip threshold of wh_ratio
:return:
|
ppdet/modeling/heads/s2anet_head.py
|
delta2rbox
|
1190202328/PaddleDetection
|
python
|
def delta2rbox(self, rrois, deltas, wh_ratio_clip=1e-06):
'\n :param rrois: (cx, cy, w, h, theta)\n :param deltas: (dx, dy, dw, dh, dtheta)\n :param means: means of anchor\n :param stds: stds of anchor\n :param wh_ratio_clip: clip threshold of wh_ratio\n :return:\n '
deltas = paddle.reshape(deltas, [(- 1), 5])
rrois = paddle.reshape(rrois, [(- 1), 5])
denorm_deltas = paddle.add(paddle.multiply(deltas, self.stds), self.means)
dx = denorm_deltas[:, 0]
dy = denorm_deltas[:, 1]
dw = denorm_deltas[:, 2]
dh = denorm_deltas[:, 3]
dangle = denorm_deltas[:, 4]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = paddle.clip(dw, min=(- max_ratio), max=max_ratio)
dh = paddle.clip(dh, min=(- max_ratio), max=max_ratio)
rroi_x = rrois[:, 0]
rroi_y = rrois[:, 1]
rroi_w = rrois[:, 2]
rroi_h = rrois[:, 3]
rroi_angle = rrois[:, 4]
gx = ((((dx * rroi_w) * paddle.cos(rroi_angle)) - ((dy * rroi_h) * paddle.sin(rroi_angle))) + rroi_x)
gy = ((((dx * rroi_w) * paddle.sin(rroi_angle)) + ((dy * rroi_h) * paddle.cos(rroi_angle))) + rroi_y)
gw = (rroi_w * dw.exp())
gh = (rroi_h * dh.exp())
ga = ((np.pi * dangle) + rroi_angle)
ga = (((ga + (np.pi / 4)) % np.pi) - (np.pi / 4))
ga = paddle.to_tensor(ga)
gw = paddle.to_tensor(gw, dtype='float32')
gh = paddle.to_tensor(gh, dtype='float32')
bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=(- 1))
return bboxes
|
def bbox_decode(self, bbox_preds, anchors):
'decode bbox from deltas\n Args:\n bbox_preds: [N,H,W,5]\n anchors: [H*W,5]\n return:\n bboxes: [N,H,W,5]\n '
(num_imgs, H, W, _) = bbox_preds.shape
bbox_delta = paddle.reshape(bbox_preds, [(- 1), 5])
bboxes = self.delta2rbox(anchors, bbox_delta)
return bboxes
| -8,842,847,705,775,022,000
|
decode bbox from deltas
Args:
bbox_preds: [N,H,W,5]
anchors: [H*W,5]
return:
bboxes: [N,H,W,5]
|
ppdet/modeling/heads/s2anet_head.py
|
bbox_decode
|
1190202328/PaddleDetection
|
python
|
def bbox_decode(self, bbox_preds, anchors):
'decode bbox from deltas\n Args:\n bbox_preds: [N,H,W,5]\n anchors: [H*W,5]\n return:\n bboxes: [N,H,W,5]\n '
(num_imgs, H, W, _) = bbox_preds.shape
bbox_delta = paddle.reshape(bbox_preds, [(- 1), 5])
bboxes = self.delta2rbox(anchors, bbox_delta)
return bboxes
|
def get_properties(self):
'\n construct 3 dictionaries:\n - joint index to joint name x2 (1 for revolute, 1 for fixed joints)\n - link name to link index dictionary\n '
rev_joint_index_name_dic = {}
fixed_joint_index_name_dic = {}
prismatic_joint_index_name_dic = {}
link_names_to_ids_dic = {}
for joint_index in range(0, self.pb.getNumJoints(self.robot)):
info = self.pb.getJointInfo(self.robot, joint_index)
link_names_to_ids_dic[info[12].decode('utf-8')] = joint_index
if (info[2] == self.pb.JOINT_REVOLUTE):
rev_joint_index_name_dic[joint_index] = info[1].decode('utf-8')
elif (info[2] == self.pb.JOINT_FIXED):
fixed_joint_index_name_dic[joint_index] = info[1].decode('utf-8')
elif (info[2] == self.pb.JOINT_PRISMATIC):
prismatic_joint_index_name_dic[joint_index] = info[1].decode('utf-8')
return (rev_joint_index_name_dic, prismatic_joint_index_name_dic, fixed_joint_index_name_dic, link_names_to_ids_dic)
| -2,535,933,811,214,409,700
|
construct 3 dictionaries:
- joint index to joint name x2 (1 for revolute, 1 for fixed joints)
- link name to link index dictionary
|
pybullet_ros/pybullet_ros_wrapper.py
|
get_properties
|
packbionics/pybullet_ros
|
python
|
def get_properties(self):
'\n construct 3 dictionaries:\n - joint index to joint name x2 (1 for revolute, 1 for fixed joints)\n - link name to link index dictionary\n '
rev_joint_index_name_dic = {}
fixed_joint_index_name_dic = {}
prismatic_joint_index_name_dic = {}
link_names_to_ids_dic = {}
for joint_index in range(0, self.pb.getNumJoints(self.robot)):
info = self.pb.getJointInfo(self.robot, joint_index)
link_names_to_ids_dic[info[12].decode('utf-8')] = joint_index
if (info[2] == self.pb.JOINT_REVOLUTE):
rev_joint_index_name_dic[joint_index] = info[1].decode('utf-8')
elif (info[2] == self.pb.JOINT_FIXED):
fixed_joint_index_name_dic[joint_index] = info[1].decode('utf-8')
elif (info[2] == self.pb.JOINT_PRISMATIC):
prismatic_joint_index_name_dic[joint_index] = info[1].decode('utf-8')
return (rev_joint_index_name_dic, prismatic_joint_index_name_dic, fixed_joint_index_name_dic, link_names_to_ids_dic)
|
def handle_reset_simulation(self, req):
'Callback to handle the service offered by this node to reset the simulation'
self.get_logger().info('reseting simulation now')
self.pb.resetSimulation()
return Empty()
| -6,883,740,475,398,433,000
|
Callback to handle the service offered by this node to reset the simulation
|
pybullet_ros/pybullet_ros_wrapper.py
|
handle_reset_simulation
|
packbionics/pybullet_ros
|
python
|
def handle_reset_simulation(self, req):
self.get_logger().info('reseting simulation now')
self.pb.resetSimulation()
return Empty()
|
def start_gui(self, gui=True):
'start physics engine (client) with or without gui'
if gui:
self.get_logger().info('Running pybullet with gui')
self.get_logger().info('-------------------------')
gui_options = self.get_parameter('gui_options').value
return self.pb.connect(self.pb.GUI, options=gui_options)
else:
self.get_logger().info('Running pybullet without gui')
self.get_logger().info('-------------------------')
return self.pb.connect(self.pb.DIRECT)
| -1,342,515,236,442,768,000
|
start physics engine (client) with or without gui
|
pybullet_ros/pybullet_ros_wrapper.py
|
start_gui
|
packbionics/pybullet_ros
|
python
|
def start_gui(self, gui=True):
if gui:
self.get_logger().info('Running pybullet with gui')
self.get_logger().info('-------------------------')
gui_options = self.get_parameter('gui_options').value
return self.pb.connect(self.pb.GUI, options=gui_options)
else:
self.get_logger().info('Running pybullet without gui')
self.get_logger().info('-------------------------')
return self.pb.connect(self.pb.DIRECT)
|
def init_pybullet_robot(self):
'load robot URDF model, set gravity, ground plane and environment'
urdf_path = self.get_parameter('robot_urdf_path').value
if (urdf_path == None):
self.get_logger().warn('mandatory param robot_urdf_path not set, will exit now')
rclpy.shutdown()
if (not os.path.isfile(urdf_path)):
self.get_logger().error(('param robot_urdf_path is set, but file does not exist : ' + urdf_path))
rclpy.shutdown()
return None
if ('xacro' in urdf_path):
urdf_path_without_xacro = (urdf_path[0:urdf_path.find('.xacro')] + urdf_path[(urdf_path.find('.xacro') + len('.xacro')):])
os.system(f'xacro {urdf_path} -o {urdf_path_without_xacro}')
urdf_path = urdf_path_without_xacro
robot_pose_x = self.get_parameter('robot_pose_x').value
robot_pose_y = self.get_parameter('robot_pose_y').value
robot_pose_z = self.get_parameter('robot_pose_z').value
robot_pose_yaw = self.get_parameter('robot_pose_yaw').value
robot_spawn_orientation = self.pb.getQuaternionFromEuler([0.0, 0.0, robot_pose_yaw])
fixed_base = self.get_parameter('fixed_base').value
if self.get_parameter('use_inertia_from_file').value:
urdf_flags = (self.pb.URDF_USE_INERTIA_FROM_FILE | self.pb.URDF_USE_SELF_COLLISION)
else:
urdf_flags = self.pb.URDF_USE_SELF_COLLISION
self.get_logger().info('loading environment')
self.environment.load_environment()
self.pb.setRealTimeSimulation(0)
self.get_logger().info(('loading urdf model: ' + urdf_path))
return self.pb.loadURDF(urdf_path, basePosition=[robot_pose_x, robot_pose_y, robot_pose_z], baseOrientation=robot_spawn_orientation, useFixedBase=fixed_base, flags=urdf_flags)
| -4,932,683,326,942,651,000
|
load robot URDF model, set gravity, ground plane and environment
|
pybullet_ros/pybullet_ros_wrapper.py
|
init_pybullet_robot
|
packbionics/pybullet_ros
|
python
|
def init_pybullet_robot(self):
urdf_path = self.get_parameter('robot_urdf_path').value
if (urdf_path == None):
self.get_logger().warn('mandatory param robot_urdf_path not set, will exit now')
rclpy.shutdown()
if (not os.path.isfile(urdf_path)):
self.get_logger().error(('param robot_urdf_path is set, but file does not exist : ' + urdf_path))
rclpy.shutdown()
return None
if ('xacro' in urdf_path):
urdf_path_without_xacro = (urdf_path[0:urdf_path.find('.xacro')] + urdf_path[(urdf_path.find('.xacro') + len('.xacro')):])
os.system(f'xacro {urdf_path} -o {urdf_path_without_xacro}')
urdf_path = urdf_path_without_xacro
robot_pose_x = self.get_parameter('robot_pose_x').value
robot_pose_y = self.get_parameter('robot_pose_y').value
robot_pose_z = self.get_parameter('robot_pose_z').value
robot_pose_yaw = self.get_parameter('robot_pose_yaw').value
robot_spawn_orientation = self.pb.getQuaternionFromEuler([0.0, 0.0, robot_pose_yaw])
fixed_base = self.get_parameter('fixed_base').value
if self.get_parameter('use_inertia_from_file').value:
urdf_flags = (self.pb.URDF_USE_INERTIA_FROM_FILE | self.pb.URDF_USE_SELF_COLLISION)
else:
urdf_flags = self.pb.URDF_USE_SELF_COLLISION
self.get_logger().info('loading environment')
self.environment.load_environment()
self.pb.setRealTimeSimulation(0)
self.get_logger().info(('loading urdf model: ' + urdf_path))
return self.pb.loadURDF(urdf_path, basePosition=[robot_pose_x, robot_pose_y, robot_pose_z], baseOrientation=robot_spawn_orientation, useFixedBase=fixed_base, flags=urdf_flags)
|
def handle_reset_simulation(self, req):
'Callback to handle the service offered by this node to reset the simulation'
self.get_logger().info('reseting simulation now')
self.pause_simulation = True
self.pb.resetSimulation()
self.init_pybullet_robot()
self.pause_simulation = False
return []
| -1,467,310,471,600,018,400
|
Callback to handle the service offered by this node to reset the simulation
|
pybullet_ros/pybullet_ros_wrapper.py
|
handle_reset_simulation
|
packbionics/pybullet_ros
|
python
|
def handle_reset_simulation(self, req):
self.get_logger().info('reseting simulation now')
self.pause_simulation = True
self.pb.resetSimulation()
self.init_pybullet_robot()
self.pause_simulation = False
return []
|
def handle_pause_physics(self, req):
'pause simulation, raise flag to prevent pybullet to execute self.pb.stepSimulation()'
self.get_logger().info('pausing simulation')
self.pause_simulation = False
return []
| 6,490,197,465,917,417,000
|
pause simulation, raise flag to prevent pybullet to execute self.pb.stepSimulation()
|
pybullet_ros/pybullet_ros_wrapper.py
|
handle_pause_physics
|
packbionics/pybullet_ros
|
python
|
def handle_pause_physics(self, req):
self.get_logger().info('pausing simulation')
self.pause_simulation = False
return []
|
def handle_unpause_physics(self, req):
'unpause simulation, lower flag to allow pybullet to execute self.pb.stepSimulation()'
self.get_logger().info('unpausing simulation')
self.pause_simulation = True
return []
| 276,483,701,356,102,600
|
unpause simulation, lower flag to allow pybullet to execute self.pb.stepSimulation()
|
pybullet_ros/pybullet_ros_wrapper.py
|
handle_unpause_physics
|
packbionics/pybullet_ros
|
python
|
def handle_unpause_physics(self, req):
self.get_logger().info('unpausing simulation')
self.pause_simulation = True
return []
|
def __init__(__self__, resource_name, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None, __props__=None, __name__=None, __opts__=None):
'\n Provides a S3 bucket [analytics configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) resource.\n\n ## Example Usage\n ### Add analytics configuration for entire S3 bucket and export results to a second S3 bucket\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n example = aws.s3.Bucket("example")\n analytics = aws.s3.Bucket("analytics")\n example_entire_bucket = aws.s3.AnalyticsConfiguration("example-entire-bucket",\n bucket=example.bucket,\n storage_class_analysis={\n "dataExport": {\n "destination": {\n "s3BucketDestination": {\n "bucketArn": analytics.arn,\n },\n },\n },\n })\n ```\n ### Add analytics configuration with S3 bucket object filter\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n example = aws.s3.Bucket("example")\n example_filtered = aws.s3.AnalyticsConfiguration("example-filtered",\n bucket=example.bucket,\n filter={\n "prefix": "documents/",\n "tags": {\n "priority": "high",\n "class": "blue",\n },\n })\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with.\n :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below).\n :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket.\n :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below).\n\n The **filter** object supports the following:\n\n * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.\n * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering.\n\n The **storage_class_analysis** object supports the following:\n\n * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below).\n * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below).\n * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below).\n * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket.\n * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket.\n * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`.\n * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.\n\n * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if (bucket is None):
raise TypeError("Missing required property 'bucket'")
__props__['bucket'] = bucket
__props__['filter'] = filter
__props__['name'] = name
__props__['storage_class_analysis'] = storage_class_analysis
super(AnalyticsConfiguration, __self__).__init__('aws:s3/analyticsConfiguration:AnalyticsConfiguration', resource_name, __props__, opts)
| -7,051,861,357,956,645,000
|
Provides a S3 bucket [analytics configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) resource.
## Example Usage
### Add analytics configuration for entire S3 bucket and export results to a second S3 bucket
```python
import pulumi
import pulumi_aws as aws
example = aws.s3.Bucket("example")
analytics = aws.s3.Bucket("analytics")
example_entire_bucket = aws.s3.AnalyticsConfiguration("example-entire-bucket",
bucket=example.bucket,
storage_class_analysis={
"dataExport": {
"destination": {
"s3BucketDestination": {
"bucketArn": analytics.arn,
},
},
},
})
```
### Add analytics configuration with S3 bucket object filter
```python
import pulumi
import pulumi_aws as aws
example = aws.s3.Bucket("example")
example_filtered = aws.s3.AnalyticsConfiguration("example-filtered",
bucket=example.bucket,
filter={
"prefix": "documents/",
"tags": {
"priority": "high",
"class": "blue",
},
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with.
:param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below).
:param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket.
:param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below).
The **filter** object supports the following:
* `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.
* `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering.
The **storage_class_analysis** object supports the following:
* `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below).
* `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below).
* `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below).
* `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket.
* `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket.
* `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`.
* `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.
* `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`.
|
sdk/python/pulumi_aws/s3/analytics_configuration.py
|
__init__
|
michael-golden/pulumi-aws
|
python
|
def __init__(__self__, resource_name, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None, __props__=None, __name__=None, __opts__=None):
'\n Provides a S3 bucket [analytics configuration](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) resource.\n\n ## Example Usage\n ### Add analytics configuration for entire S3 bucket and export results to a second S3 bucket\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n example = aws.s3.Bucket("example")\n analytics = aws.s3.Bucket("analytics")\n example_entire_bucket = aws.s3.AnalyticsConfiguration("example-entire-bucket",\n bucket=example.bucket,\n storage_class_analysis={\n "dataExport": {\n "destination": {\n "s3BucketDestination": {\n "bucketArn": analytics.arn,\n },\n },\n },\n })\n ```\n ### Add analytics configuration with S3 bucket object filter\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n example = aws.s3.Bucket("example")\n example_filtered = aws.s3.AnalyticsConfiguration("example-filtered",\n bucket=example.bucket,\n filter={\n "prefix": "documents/",\n "tags": {\n "priority": "high",\n "class": "blue",\n },\n })\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with.\n :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below).\n :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket.\n :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below).\n\n The **filter** object supports the following:\n\n * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.\n * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering.\n\n The **storage_class_analysis** object supports the following:\n\n * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below).\n * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below).\n * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below).\n * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket.\n * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket.\n * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`.\n * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.\n\n * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if (bucket is None):
raise TypeError("Missing required property 'bucket'")
__props__['bucket'] = bucket
__props__['filter'] = filter
__props__['name'] = name
__props__['storage_class_analysis'] = storage_class_analysis
super(AnalyticsConfiguration, __self__).__init__('aws:s3/analyticsConfiguration:AnalyticsConfiguration', resource_name, __props__, opts)
|
@staticmethod
def get(resource_name, id, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None):
"\n Get an existing AnalyticsConfiguration resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param str id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with.\n :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below).\n :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket.\n :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below).\n\n The **filter** object supports the following:\n\n * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.\n * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering.\n\n The **storage_class_analysis** object supports the following:\n\n * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below).\n * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below).\n * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below).\n * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket.\n * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket.\n * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`.\n * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.\n\n * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__['bucket'] = bucket
__props__['filter'] = filter
__props__['name'] = name
__props__['storage_class_analysis'] = storage_class_analysis
return AnalyticsConfiguration(resource_name, opts=opts, __props__=__props__)
| -381,802,085,651,716,030
|
Get an existing AnalyticsConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with.
:param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below).
:param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket.
:param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below).
The **filter** object supports the following:
* `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.
* `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering.
The **storage_class_analysis** object supports the following:
* `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below).
* `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below).
* `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below).
* `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket.
* `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket.
* `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`.
* `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.
* `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`.
|
sdk/python/pulumi_aws/s3/analytics_configuration.py
|
get
|
michael-golden/pulumi-aws
|
python
|
@staticmethod
def get(resource_name, id, opts=None, bucket=None, filter=None, name=None, storage_class_analysis=None):
"\n Get an existing AnalyticsConfiguration resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param str id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] bucket: The name of the bucket this analytics configuration is associated with.\n :param pulumi.Input[dict] filter: Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below).\n :param pulumi.Input[str] name: Unique identifier of the analytics configuration for the bucket.\n :param pulumi.Input[dict] storage_class_analysis: Configuration for the analytics data export (documented below).\n\n The **filter** object supports the following:\n\n * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.\n * `tags` (`pulumi.Input[dict]`) - Set of object tags for filtering.\n\n The **storage_class_analysis** object supports the following:\n\n * `dataExport` (`pulumi.Input[dict]`) - Data export configuration (documented below).\n * `destination` (`pulumi.Input[dict]`) - Specifies the destination for the exported analytics data (documented below).\n * `s3BucketDestination` (`pulumi.Input[dict]`) - Analytics data export currently only supports an S3 bucket destination (documented below).\n * `bucketAccountId` (`pulumi.Input[str]`) - The account ID that owns the destination bucket.\n * `bucketArn` (`pulumi.Input[str]`) - The ARN of the destination bucket.\n * `format` (`pulumi.Input[str]`) - The output format of exported analytics data. Allowed values: `CSV`. Default value: `CSV`.\n * `prefix` (`pulumi.Input[str]`) - Object prefix for filtering.\n\n * `outputSchemaVersion` (`pulumi.Input[str]`) - The schema version of exported analytics data. Allowed values: `V_1`. Default value: `V_1`.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__['bucket'] = bucket
__props__['filter'] = filter
__props__['name'] = name
__props__['storage_class_analysis'] = storage_class_analysis
return AnalyticsConfiguration(resource_name, opts=opts, __props__=__props__)
|
def test_create_file(self):
'Test the creation of a simple XlsxWriter file.'
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'scatter', 'subtype': 'straight'})
chart.axis_ids = [54010624, 45705856]
data = [[1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15]]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| -8,052,261,018,325,395,000
|
Test the creation of a simple XlsxWriter file.
|
xlsxwriter/test/comparison/test_chart_scatter03.py
|
test_create_file
|
CrackerCat/XlsxWriter
|
python
|
def test_create_file(self):
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'scatter', 'subtype': 'straight'})
chart.axis_ids = [54010624, 45705856]
data = [[1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15]]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
def zipf_distribution(nbr_symbols, alpha):
"Helper function: Create a Zipf distribution.\n\n Args:\n nbr_symbols: number of symbols to use in the distribution.\n alpha: float, Zipf's Law Distribution parameter. Default = 1.5.\n Usually for modelling natural text distribution is in\n the range [1.1-1.6].\n\n Returns:\n distr_map: list of float, Zipf's distribution over nbr_symbols.\n\n "
tmp = np.power(np.arange(1, (nbr_symbols + 1)), (- alpha))
zeta = np.r_[(0.0, np.cumsum(tmp))]
return [(x / zeta[(- 1)]) for x in zeta]
| 5,605,568,572,597,886,000
|
Helper function: Create a Zipf distribution.
Args:
nbr_symbols: number of symbols to use in the distribution.
alpha: float, Zipf's Law Distribution parameter. Default = 1.5.
Usually for modelling natural text distribution is in
the range [1.1-1.6].
Returns:
distr_map: list of float, Zipf's distribution over nbr_symbols.
|
tensor2tensor/data_generators/algorithmic.py
|
zipf_distribution
|
PedroLelis/tensor2tensor
|
python
|
def zipf_distribution(nbr_symbols, alpha):
"Helper function: Create a Zipf distribution.\n\n Args:\n nbr_symbols: number of symbols to use in the distribution.\n alpha: float, Zipf's Law Distribution parameter. Default = 1.5.\n Usually for modelling natural text distribution is in\n the range [1.1-1.6].\n\n Returns:\n distr_map: list of float, Zipf's distribution over nbr_symbols.\n\n "
tmp = np.power(np.arange(1, (nbr_symbols + 1)), (- alpha))
zeta = np.r_[(0.0, np.cumsum(tmp))]
return [(x / zeta[(- 1)]) for x in zeta]
|
def zipf_random_sample(distr_map, sample_len):
"Helper function: Generate a random Zipf sample of given length.\n\n Args:\n distr_map: list of float, Zipf's distribution over nbr_symbols.\n sample_len: integer, length of sequence to generate.\n\n Returns:\n sample: list of integer, Zipf's random sample over nbr_symbols.\n\n "
u = np.random.random(sample_len)
return list(np.searchsorted(distr_map, u))
| 3,990,493,769,506,299,400
|
Helper function: Generate a random Zipf sample of given length.
Args:
distr_map: list of float, Zipf's distribution over nbr_symbols.
sample_len: integer, length of sequence to generate.
Returns:
sample: list of integer, Zipf's random sample over nbr_symbols.
|
tensor2tensor/data_generators/algorithmic.py
|
zipf_random_sample
|
PedroLelis/tensor2tensor
|
python
|
def zipf_random_sample(distr_map, sample_len):
"Helper function: Generate a random Zipf sample of given length.\n\n Args:\n distr_map: list of float, Zipf's distribution over nbr_symbols.\n sample_len: integer, length of sequence to generate.\n\n Returns:\n sample: list of integer, Zipf's random sample over nbr_symbols.\n\n "
u = np.random.random(sample_len)
return list(np.searchsorted(distr_map, u))
|
def reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, scale_std_dev=100, alpha=1.5):
'Generator for the reversing nlp-like task on sequences of symbols.\n\n The length of the sequence is drawn from a Gaussian(Normal) distribution\n at random from [1, max_length] and with std deviation of 1%,\n then symbols are drawn from Zipf\'s law at random from [0, nbr_symbols) until\n nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: integer, number of symbols.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n scale_std_dev: float, Normal distribution\'s standard deviation scale factor\n used to draw the length of sequence. Default = 1% of the max_length.\n alpha: float, Zipf\'s Law Distribution parameter. Default = 1.5.\n Usually for modelling natural text distribution is in\n the range [1.1-1.6].\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n target-list is input-list reversed.\n '
std_dev = (max_length / scale_std_dev)
distr_map = zipf_distribution(nbr_symbols, alpha)
for _ in range(nbr_cases):
l = int((abs(np.random.normal(loc=(max_length / 2), scale=std_dev)) + 1))
inputs = zipf_random_sample(distr_map, l)
(yield {'inputs': inputs, 'targets': list(reversed(inputs))})
| 6,382,139,318,736,053,000
|
Generator for the reversing nlp-like task on sequences of symbols.
The length of the sequence is drawn from a Gaussian(Normal) distribution
at random from [1, max_length] and with std deviation of 1%,
then symbols are drawn from Zipf's law at random from [0, nbr_symbols) until
nbr_cases sequences have been produced.
Args:
nbr_symbols: integer, number of symbols.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
scale_std_dev: float, Normal distribution's standard deviation scale factor
used to draw the length of sequence. Default = 1% of the max_length.
alpha: float, Zipf's Law Distribution parameter. Default = 1.5.
Usually for modelling natural text distribution is in
the range [1.1-1.6].
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
target-list is input-list reversed.
|
tensor2tensor/data_generators/algorithmic.py
|
reverse_generator_nlplike
|
PedroLelis/tensor2tensor
|
python
|
def reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, scale_std_dev=100, alpha=1.5):
'Generator for the reversing nlp-like task on sequences of symbols.\n\n The length of the sequence is drawn from a Gaussian(Normal) distribution\n at random from [1, max_length] and with std deviation of 1%,\n then symbols are drawn from Zipf\'s law at random from [0, nbr_symbols) until\n nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: integer, number of symbols.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n scale_std_dev: float, Normal distribution\'s standard deviation scale factor\n used to draw the length of sequence. Default = 1% of the max_length.\n alpha: float, Zipf\'s Law Distribution parameter. Default = 1.5.\n Usually for modelling natural text distribution is in\n the range [1.1-1.6].\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n target-list is input-list reversed.\n '
std_dev = (max_length / scale_std_dev)
distr_map = zipf_distribution(nbr_symbols, alpha)
for _ in range(nbr_cases):
l = int((abs(np.random.normal(loc=(max_length / 2), scale=std_dev)) + 1))
inputs = zipf_random_sample(distr_map, l)
(yield {'inputs': inputs, 'targets': list(reversed(inputs))})
|
def lower_endian_to_number(l, base):
'Helper function: convert a list of digits in the given base to a number.'
return sum([(d * (base ** i)) for (i, d) in enumerate(l)])
| 2,888,343,823,394,923,000
|
Helper function: convert a list of digits in the given base to a number.
|
tensor2tensor/data_generators/algorithmic.py
|
lower_endian_to_number
|
PedroLelis/tensor2tensor
|
python
|
def lower_endian_to_number(l, base):
return sum([(d * (base ** i)) for (i, d) in enumerate(l)])
|
def number_to_lower_endian(n, base):
'Helper function: convert a number to a list of digits in the given base.'
if (n < base):
return [n]
return ([(n % base)] + number_to_lower_endian((n // base), base))
| -7,649,382,115,913,318,000
|
Helper function: convert a number to a list of digits in the given base.
|
tensor2tensor/data_generators/algorithmic.py
|
number_to_lower_endian
|
PedroLelis/tensor2tensor
|
python
|
def number_to_lower_endian(n, base):
if (n < base):
return [n]
return ([(n % base)] + number_to_lower_endian((n // base), base))
|
def random_number_lower_endian(length, base):
'Helper function: generate a random number as a lower-endian digits list.'
if (length == 1):
return [np.random.randint(base)]
prefix = [np.random.randint(base) for _ in range((length - 1))]
return (prefix + [(np.random.randint((base - 1)) + 1)])
| -609,732,790,837,893,400
|
Helper function: generate a random number as a lower-endian digits list.
|
tensor2tensor/data_generators/algorithmic.py
|
random_number_lower_endian
|
PedroLelis/tensor2tensor
|
python
|
def random_number_lower_endian(length, base):
if (length == 1):
return [np.random.randint(base)]
prefix = [np.random.randint(base) for _ in range((length - 1))]
return (prefix + [(np.random.randint((base - 1)) + 1)])
|
def generator(self, nbr_symbols, max_length, nbr_cases):
'Generates the data.'
raise NotImplementedError()
| 101,036,179,159,610,560
|
Generates the data.
|
tensor2tensor/data_generators/algorithmic.py
|
generator
|
PedroLelis/tensor2tensor
|
python
|
def generator(self, nbr_symbols, max_length, nbr_cases):
raise NotImplementedError()
|
def generator(self, nbr_symbols, max_length, nbr_cases):
'Generator for the identity (copy) task on sequences of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn uniformly at random from [0, nbr_symbols) until\n nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: number of symbols to use in each sequence.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n input-list and target-list are the same.\n '
for _ in range(nbr_cases):
l = (np.random.randint(max_length) + 1)
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
(yield {'inputs': inputs, 'targets': inputs})
| 1,047,746,998,304,470,700
|
Generator for the identity (copy) task on sequences of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn uniformly at random from [0, nbr_symbols) until
nbr_cases sequences have been produced.
Args:
nbr_symbols: number of symbols to use in each sequence.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list and target-list are the same.
|
tensor2tensor/data_generators/algorithmic.py
|
generator
|
PedroLelis/tensor2tensor
|
python
|
def generator(self, nbr_symbols, max_length, nbr_cases):
'Generator for the identity (copy) task on sequences of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn uniformly at random from [0, nbr_symbols) until\n nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: number of symbols to use in each sequence.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n input-list and target-list are the same.\n '
for _ in range(nbr_cases):
l = (np.random.randint(max_length) + 1)
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
(yield {'inputs': inputs, 'targets': inputs})
|
def generator(self, nbr_symbols, max_length, nbr_cases):
'Generator for the shift task on sequences of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn uniformly at random from [0, nbr_symbols - shift]\n until nbr_cases sequences have been produced (output[i] = input[i] + shift).\n\n Args:\n nbr_symbols: number of symbols to use in each sequence (input + output).\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n target-list[i] = input-list[i] + shift.\n '
shift = 10
for _ in range(nbr_cases):
l = (np.random.randint(max_length) + 1)
inputs = [np.random.randint((nbr_symbols - shift)) for _ in range(l)]
(yield {'inputs': inputs, 'targets': [(i + shift) for i in inputs]})
| 8,281,185,637,235,074,000
|
Generator for the shift task on sequences of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn uniformly at random from [0, nbr_symbols - shift]
until nbr_cases sequences have been produced (output[i] = input[i] + shift).
Args:
nbr_symbols: number of symbols to use in each sequence (input + output).
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
target-list[i] = input-list[i] + shift.
|
tensor2tensor/data_generators/algorithmic.py
|
generator
|
PedroLelis/tensor2tensor
|
python
|
def generator(self, nbr_symbols, max_length, nbr_cases):
'Generator for the shift task on sequences of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn uniformly at random from [0, nbr_symbols - shift]\n until nbr_cases sequences have been produced (output[i] = input[i] + shift).\n\n Args:\n nbr_symbols: number of symbols to use in each sequence (input + output).\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n target-list[i] = input-list[i] + shift.\n '
shift = 10
for _ in range(nbr_cases):
l = (np.random.randint(max_length) + 1)
inputs = [np.random.randint((nbr_symbols - shift)) for _ in range(l)]
(yield {'inputs': inputs, 'targets': [(i + shift) for i in inputs]})
|
def generator(self, nbr_symbols, max_length, nbr_cases):
'Generator for the reversing task on sequences of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn uniformly at random from [0, nbr_symbols) until\n nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: number of symbols to use in each sequence.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n target-list is input-list reversed.\n '
for _ in range(nbr_cases):
l = (np.random.randint(max_length) + 1)
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
(yield {'inputs': inputs, 'targets': list(reversed(inputs))})
| -171,245,036,602,414,620
|
Generator for the reversing task on sequences of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn uniformly at random from [0, nbr_symbols) until
nbr_cases sequences have been produced.
Args:
nbr_symbols: number of symbols to use in each sequence.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
target-list is input-list reversed.
|
tensor2tensor/data_generators/algorithmic.py
|
generator
|
PedroLelis/tensor2tensor
|
python
|
def generator(self, nbr_symbols, max_length, nbr_cases):
'Generator for the reversing task on sequences of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn uniformly at random from [0, nbr_symbols) until\n nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: number of symbols to use in each sequence.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n target-list is input-list reversed.\n '
for _ in range(nbr_cases):
l = (np.random.randint(max_length) + 1)
inputs = [np.random.randint(nbr_symbols) for _ in range(l)]
(yield {'inputs': inputs, 'targets': list(reversed(inputs))})
|
def generator(self, base, max_length, nbr_cases):
'Generator for the addition task.\n\n The length of each number is drawn uniformly at random in [1, max_length/2]\n and then digits are drawn uniformly at random. The numbers are added and\n separated by [base] in the input. Stops at nbr_cases.\n\n Args:\n base: in which base are the numbers.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n input-list are the 2 numbers and target-list is the result of adding them.\n\n Raises:\n ValueError: if max_length is lower than 3.\n '
if (max_length < 3):
raise ValueError('Maximum length must be at least 3.')
for _ in range(nbr_cases):
l1 = (np.random.randint((max_length // 2)) + 1)
l2 = (np.random.randint(((max_length - l1) - 1)) + 1)
n1 = random_number_lower_endian(l1, base)
n2 = random_number_lower_endian(l2, base)
result = (lower_endian_to_number(n1, base) + lower_endian_to_number(n2, base))
inputs = ((n1 + [base]) + n2)
targets = number_to_lower_endian(result, base)
(yield {'inputs': inputs, 'targets': targets})
| -496,073,463,817,623,360
|
Generator for the addition task.
The length of each number is drawn uniformly at random in [1, max_length/2]
and then digits are drawn uniformly at random. The numbers are added and
separated by [base] in the input. Stops at nbr_cases.
Args:
base: in which base are the numbers.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the 2 numbers and target-list is the result of adding them.
Raises:
ValueError: if max_length is lower than 3.
|
tensor2tensor/data_generators/algorithmic.py
|
generator
|
PedroLelis/tensor2tensor
|
python
|
def generator(self, base, max_length, nbr_cases):
'Generator for the addition task.\n\n The length of each number is drawn uniformly at random in [1, max_length/2]\n and then digits are drawn uniformly at random. The numbers are added and\n separated by [base] in the input. Stops at nbr_cases.\n\n Args:\n base: in which base are the numbers.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n input-list are the 2 numbers and target-list is the result of adding them.\n\n Raises:\n ValueError: if max_length is lower than 3.\n '
if (max_length < 3):
raise ValueError('Maximum length must be at least 3.')
for _ in range(nbr_cases):
l1 = (np.random.randint((max_length // 2)) + 1)
l2 = (np.random.randint(((max_length - l1) - 1)) + 1)
n1 = random_number_lower_endian(l1, base)
n2 = random_number_lower_endian(l2, base)
result = (lower_endian_to_number(n1, base) + lower_endian_to_number(n2, base))
inputs = ((n1 + [base]) + n2)
targets = number_to_lower_endian(result, base)
(yield {'inputs': inputs, 'targets': targets})
|
def generator(self, base, max_length, nbr_cases):
'Generator for the multiplication task.\n\n The length of each number is drawn uniformly at random in [1, max_length/2]\n and then digits are drawn uniformly at random. The numbers are multiplied\n and separated by [base] in the input. Stops at nbr_cases.\n\n Args:\n base: in which base are the numbers.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n input-list are the 2 numbers and target-list is the result of multiplying\n them.\n\n Raises:\n ValueError: if max_length is lower than 3.\n '
if (max_length < 3):
raise ValueError('Maximum length must be at least 3.')
for _ in range(nbr_cases):
l1 = (np.random.randint((max_length // 2)) + 1)
l2 = (np.random.randint(((max_length - l1) - 1)) + 1)
n1 = random_number_lower_endian(l1, base)
n2 = random_number_lower_endian(l2, base)
result = (lower_endian_to_number(n1, base) * lower_endian_to_number(n2, base))
inputs = ((n1 + [base]) + n2)
targets = number_to_lower_endian(result, base)
(yield {'inputs': inputs, 'targets': targets})
| 3,938,567,779,248,577,500
|
Generator for the multiplication task.
The length of each number is drawn uniformly at random in [1, max_length/2]
and then digits are drawn uniformly at random. The numbers are multiplied
and separated by [base] in the input. Stops at nbr_cases.
Args:
base: in which base are the numbers.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
input-list are the 2 numbers and target-list is the result of multiplying
them.
Raises:
ValueError: if max_length is lower than 3.
|
tensor2tensor/data_generators/algorithmic.py
|
generator
|
PedroLelis/tensor2tensor
|
python
|
def generator(self, base, max_length, nbr_cases):
'Generator for the multiplication task.\n\n The length of each number is drawn uniformly at random in [1, max_length/2]\n and then digits are drawn uniformly at random. The numbers are multiplied\n and separated by [base] in the input. Stops at nbr_cases.\n\n Args:\n base: in which base are the numbers.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n input-list are the 2 numbers and target-list is the result of multiplying\n them.\n\n Raises:\n ValueError: if max_length is lower than 3.\n '
if (max_length < 3):
raise ValueError('Maximum length must be at least 3.')
for _ in range(nbr_cases):
l1 = (np.random.randint((max_length // 2)) + 1)
l2 = (np.random.randint(((max_length - l1) - 1)) + 1)
n1 = random_number_lower_endian(l1, base)
n2 = random_number_lower_endian(l2, base)
result = (lower_endian_to_number(n1, base) * lower_endian_to_number(n2, base))
inputs = ((n1 + [base]) + n2)
targets = number_to_lower_endian(result, base)
(yield {'inputs': inputs, 'targets': targets})
|
@property
def unique(self):
'Unique numbers wo/ replacement or w/ replacement in sorting task.'
return False
| -7,437,857,752,923,323,000
|
Unique numbers wo/ replacement or w/ replacement in sorting task.
|
tensor2tensor/data_generators/algorithmic.py
|
unique
|
PedroLelis/tensor2tensor
|
python
|
@property
def unique(self):
return False
|
def generator(self, nbr_symbols, max_length, nbr_cases):
'Generating for sorting task on sequence of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn (uniquely w/ or w/o replacement) uniformly at\n random from [0, nbr_symbols) until nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: number of symbols to use in each sequence.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n target-list is input-list sorted.\n '
for _ in range(nbr_cases):
length = (np.random.randint(max_length) + 1)
if self.unique:
inputs = np.arange(nbr_symbols)
np.random.shuffle(inputs)
inputs = inputs[:length]
inputs = list(inputs)
else:
inputs = list(np.random.randint(nbr_symbols, size=length))
targets = list(sorted(inputs))
(yield {'inputs': inputs, 'targets': targets})
| 540,110,827,905,498,500
|
Generating for sorting task on sequence of symbols.
The length of the sequence is drawn uniformly at random from [1, max_length]
and then symbols are drawn (uniquely w/ or w/o replacement) uniformly at
random from [0, nbr_symbols) until nbr_cases sequences have been produced.
Args:
nbr_symbols: number of symbols to use in each sequence.
max_length: integer, maximum length of sequences to generate.
nbr_cases: the number of cases to generate.
Yields:
A dictionary {"inputs": input-list, "targets": target-list} where
target-list is input-list sorted.
|
tensor2tensor/data_generators/algorithmic.py
|
generator
|
PedroLelis/tensor2tensor
|
python
|
def generator(self, nbr_symbols, max_length, nbr_cases):
'Generating for sorting task on sequence of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn (uniquely w/ or w/o replacement) uniformly at\n random from [0, nbr_symbols) until nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: number of symbols to use in each sequence.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {"inputs": input-list, "targets": target-list} where\n target-list is input-list sorted.\n '
for _ in range(nbr_cases):
length = (np.random.randint(max_length) + 1)
if self.unique:
inputs = np.arange(nbr_symbols)
np.random.shuffle(inputs)
inputs = inputs[:length]
inputs = list(inputs)
else:
inputs = list(np.random.randint(nbr_symbols, size=length))
targets = list(sorted(inputs))
(yield {'inputs': inputs, 'targets': targets})
|
def generate_data(self, data_dir, tmp_dir, task_id=(- 1)):
'Ganerate data for this problem.'
del tmp_dir, task_id
identity_problem = AlgorithmicIdentityBinary40()
utils.generate_files(identity_problem.generator(self.num_symbols, 40, 100000), self.training_filepaths(data_dir, 1, shuffled=True), 100)
utils.generate_files(identity_problem.generator(self.num_symbols, 400, 10000), self.dev_filepaths(data_dir, 1, shuffled=True), 100)
| 5,385,898,703,597,336,000
|
Ganerate data for this problem.
|
tensor2tensor/data_generators/algorithmic.py
|
generate_data
|
PedroLelis/tensor2tensor
|
python
|
def generate_data(self, data_dir, tmp_dir, task_id=(- 1)):
del tmp_dir, task_id
identity_problem = AlgorithmicIdentityBinary40()
utils.generate_files(identity_problem.generator(self.num_symbols, 40, 100000), self.training_filepaths(data_dir, 1, shuffled=True), 100)
utils.generate_files(identity_problem.generator(self.num_symbols, 400, 10000), self.dev_filepaths(data_dir, 1, shuffled=True), 100)
|
@classmethod
def setup_for_test(cls):
'Setup directories and files required to run the problem.'
tmp_dir = tf.test.get_temp_dir()
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
cls.data_dir = tmp_dir
cls().generate_data(TinyAlgo.data_dir, None)
| 6,394,519,504,221,772,000
|
Setup directories and files required to run the problem.
|
tensor2tensor/data_generators/algorithmic.py
|
setup_for_test
|
PedroLelis/tensor2tensor
|
python
|
@classmethod
def setup_for_test(cls):
tmp_dir = tf.test.get_temp_dir()
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
cls.data_dir = tmp_dir
cls().generate_data(TinyAlgo.data_dir, None)
|
def generator_eos(nbr_symbols, max_length, nbr_cases):
'Shift by NUM_RESERVED_IDS and append EOS token.'
for case in self.generator(nbr_symbols, max_length, nbr_cases):
new_case = {}
for feature in case:
new_case[feature] = ([(i + text_encoder.NUM_RESERVED_TOKENS) for i in case[feature]] + [text_encoder.EOS_ID])
(yield new_case)
| 3,016,446,733,464,996,000
|
Shift by NUM_RESERVED_IDS and append EOS token.
|
tensor2tensor/data_generators/algorithmic.py
|
generator_eos
|
PedroLelis/tensor2tensor
|
python
|
def generator_eos(nbr_symbols, max_length, nbr_cases):
for case in self.generator(nbr_symbols, max_length, nbr_cases):
new_case = {}
for feature in case:
new_case[feature] = ([(i + text_encoder.NUM_RESERVED_TOKENS) for i in case[feature]] + [text_encoder.EOS_ID])
(yield new_case)
|
@app.errorhandler(404)
def page_not_found(e):
'Return a custom 404 error.'
return ('Sorry, nothing at this URL.', 404)
| 8,151,873,939,979,943,000
|
Return a custom 404 error.
|
main.py
|
page_not_found
|
rekab/papt
|
python
|
@app.errorhandler(404)
def page_not_found(e):
return ('Sorry, nothing at this URL.', 404)
|
def generate_data(rollouts, data_dir, noise_type):
' Generates data '
assert exists(data_dir), 'The data directory does not exist...'
env = gym.make('CarRacing-v0')
seq_len = 1000
for i in range(rollouts):
env.reset()
env.env.viewer.window.dispatch_events()
if (noise_type == 'white'):
a_rollout = [env.action_space.sample() for _ in range(seq_len)]
elif (noise_type == 'brown'):
a_rollout = sample_continuous_policy(env.action_space, seq_len, (1.0 / 50))
s_rollout = []
r_rollout = []
d_rollout = []
t = 0
while True:
action = a_rollout[t]
t += 1
(s, r, done, _) = env.step(action)
env.env.viewer.window.dispatch_events()
s_rollout += [s]
r_rollout += [r]
d_rollout += [done]
if done:
print('> End of rollout {}, {} frames...'.format(i, len(s_rollout)))
np.savez(join(data_dir, 'rollout_{}'.format(i)), observations=np.array(s_rollout), rewards=np.array(r_rollout), actions=np.array(a_rollout), terminals=np.array(d_rollout))
break
| -2,347,582,097,888,886,000
|
Generates data
|
data/carracing.py
|
generate_data
|
susanwe/world-models
|
python
|
def generate_data(rollouts, data_dir, noise_type):
' '
assert exists(data_dir), 'The data directory does not exist...'
env = gym.make('CarRacing-v0')
seq_len = 1000
for i in range(rollouts):
env.reset()
env.env.viewer.window.dispatch_events()
if (noise_type == 'white'):
a_rollout = [env.action_space.sample() for _ in range(seq_len)]
elif (noise_type == 'brown'):
a_rollout = sample_continuous_policy(env.action_space, seq_len, (1.0 / 50))
s_rollout = []
r_rollout = []
d_rollout = []
t = 0
while True:
action = a_rollout[t]
t += 1
(s, r, done, _) = env.step(action)
env.env.viewer.window.dispatch_events()
s_rollout += [s]
r_rollout += [r]
d_rollout += [done]
if done:
print('> End of rollout {}, {} frames...'.format(i, len(s_rollout)))
np.savez(join(data_dir, 'rollout_{}'.format(i)), observations=np.array(s_rollout), rewards=np.array(r_rollout), actions=np.array(a_rollout), terminals=np.array(d_rollout))
break
|
def __init__(__self__, *, accelerator_count: Optional[pulumi.Input[int]]=None, accelerator_type: Optional[pulumi.Input[str]]=None):
'\n A specification of the type and number of accelerator cards attached to the instance.\n :param pulumi.Input[int] accelerator_count: The number of the guest accelerator cards exposed to this instance.\n :param pulumi.Input[str] accelerator_type: Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.\n '
if (accelerator_count is not None):
pulumi.set(__self__, 'accelerator_count', accelerator_count)
if (accelerator_type is not None):
pulumi.set(__self__, 'accelerator_type', accelerator_type)
| -8,764,338,972,364,156,000
|
A specification of the type and number of accelerator cards attached to the instance.
:param pulumi.Input[int] accelerator_count: The number of the guest accelerator cards exposed to this instance.
:param pulumi.Input[str] accelerator_type: Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
__init__
|
AaronFriel/pulumi-google-native
|
python
|
def __init__(__self__, *, accelerator_count: Optional[pulumi.Input[int]]=None, accelerator_type: Optional[pulumi.Input[str]]=None):
'\n A specification of the type and number of accelerator cards attached to the instance.\n :param pulumi.Input[int] accelerator_count: The number of the guest accelerator cards exposed to this instance.\n :param pulumi.Input[str] accelerator_type: Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.\n '
if (accelerator_count is not None):
pulumi.set(__self__, 'accelerator_count', accelerator_count)
if (accelerator_type is not None):
pulumi.set(__self__, 'accelerator_type', accelerator_type)
|
@property
@pulumi.getter(name='acceleratorCount')
def accelerator_count(self) -> Optional[pulumi.Input[int]]:
'\n The number of the guest accelerator cards exposed to this instance.\n '
return pulumi.get(self, 'accelerator_count')
| -1,876,897,111,891,348,000
|
The number of the guest accelerator cards exposed to this instance.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
accelerator_count
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='acceleratorCount')
def accelerator_count(self) -> Optional[pulumi.Input[int]]:
'\n \n '
return pulumi.get(self, 'accelerator_count')
|
@property
@pulumi.getter(name='acceleratorType')
def accelerator_type(self) -> Optional[pulumi.Input[str]]:
'\n Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.\n '
return pulumi.get(self, 'accelerator_type')
| 7,495,562,579,804,966,000
|
Full or partial URL of the accelerator type resource to attach to this instance. For example: projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100 If you are creating an instance template, specify only the accelerator name. See GPUs on Compute Engine for a full list of accelerator types.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
accelerator_type
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='acceleratorType')
def accelerator_type(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'accelerator_type')
|
def __init__(__self__, *, external_ipv6: Optional[pulumi.Input[str]]=None, external_ipv6_prefix_length: Optional[pulumi.Input[int]]=None, name: Optional[pulumi.Input[str]]=None, nat_ip: Optional[pulumi.Input[str]]=None, network_tier: Optional[pulumi.Input['AccessConfigNetworkTier']]=None, public_ptr_domain_name: Optional[pulumi.Input[str]]=None, set_public_dns: Optional[pulumi.Input[bool]]=None, set_public_ptr: Optional[pulumi.Input[bool]]=None, type: Optional[pulumi.Input['AccessConfigType']]=None):
"\n An access configuration attached to an instance's network interface. Only one access config per instance is supported.\n :param pulumi.Input[str] external_ipv6: The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.\n :param pulumi.Input[int] external_ipv6_prefix_length: The prefix length of the external IPv6 range.\n :param pulumi.Input[str] name: The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.\n :param pulumi.Input[str] nat_ip: An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.\n :param pulumi.Input['AccessConfigNetworkTier'] network_tier: This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP.\n :param pulumi.Input[str] public_ptr_domain_name: The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range.\n :param pulumi.Input[bool] set_public_dns: Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration.\n :param pulumi.Input[bool] set_public_ptr: Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated.\n :param pulumi.Input['AccessConfigType'] type: The type of configuration. The default and only option is ONE_TO_ONE_NAT.\n "
if (external_ipv6 is not None):
pulumi.set(__self__, 'external_ipv6', external_ipv6)
if (external_ipv6_prefix_length is not None):
pulumi.set(__self__, 'external_ipv6_prefix_length', external_ipv6_prefix_length)
if (name is not None):
pulumi.set(__self__, 'name', name)
if (nat_ip is not None):
pulumi.set(__self__, 'nat_ip', nat_ip)
if (network_tier is not None):
pulumi.set(__self__, 'network_tier', network_tier)
if (public_ptr_domain_name is not None):
pulumi.set(__self__, 'public_ptr_domain_name', public_ptr_domain_name)
if (set_public_dns is not None):
pulumi.set(__self__, 'set_public_dns', set_public_dns)
if (set_public_ptr is not None):
pulumi.set(__self__, 'set_public_ptr', set_public_ptr)
if (type is not None):
pulumi.set(__self__, 'type', type)
| -9,004,404,242,649,221,000
|
An access configuration attached to an instance's network interface. Only one access config per instance is supported.
:param pulumi.Input[str] external_ipv6: The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.
:param pulumi.Input[int] external_ipv6_prefix_length: The prefix length of the external IPv6 range.
:param pulumi.Input[str] name: The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.
:param pulumi.Input[str] nat_ip: An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.
:param pulumi.Input['AccessConfigNetworkTier'] network_tier: This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP.
:param pulumi.Input[str] public_ptr_domain_name: The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range.
:param pulumi.Input[bool] set_public_dns: Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration.
:param pulumi.Input[bool] set_public_ptr: Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated.
:param pulumi.Input['AccessConfigType'] type: The type of configuration. The default and only option is ONE_TO_ONE_NAT.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
__init__
|
AaronFriel/pulumi-google-native
|
python
|
def __init__(__self__, *, external_ipv6: Optional[pulumi.Input[str]]=None, external_ipv6_prefix_length: Optional[pulumi.Input[int]]=None, name: Optional[pulumi.Input[str]]=None, nat_ip: Optional[pulumi.Input[str]]=None, network_tier: Optional[pulumi.Input['AccessConfigNetworkTier']]=None, public_ptr_domain_name: Optional[pulumi.Input[str]]=None, set_public_dns: Optional[pulumi.Input[bool]]=None, set_public_ptr: Optional[pulumi.Input[bool]]=None, type: Optional[pulumi.Input['AccessConfigType']]=None):
"\n An access configuration attached to an instance's network interface. Only one access config per instance is supported.\n :param pulumi.Input[str] external_ipv6: The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.\n :param pulumi.Input[int] external_ipv6_prefix_length: The prefix length of the external IPv6 range.\n :param pulumi.Input[str] name: The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.\n :param pulumi.Input[str] nat_ip: An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.\n :param pulumi.Input['AccessConfigNetworkTier'] network_tier: This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP.\n :param pulumi.Input[str] public_ptr_domain_name: The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range.\n :param pulumi.Input[bool] set_public_dns: Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration.\n :param pulumi.Input[bool] set_public_ptr: Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated.\n :param pulumi.Input['AccessConfigType'] type: The type of configuration. The default and only option is ONE_TO_ONE_NAT.\n "
if (external_ipv6 is not None):
pulumi.set(__self__, 'external_ipv6', external_ipv6)
if (external_ipv6_prefix_length is not None):
pulumi.set(__self__, 'external_ipv6_prefix_length', external_ipv6_prefix_length)
if (name is not None):
pulumi.set(__self__, 'name', name)
if (nat_ip is not None):
pulumi.set(__self__, 'nat_ip', nat_ip)
if (network_tier is not None):
pulumi.set(__self__, 'network_tier', network_tier)
if (public_ptr_domain_name is not None):
pulumi.set(__self__, 'public_ptr_domain_name', public_ptr_domain_name)
if (set_public_dns is not None):
pulumi.set(__self__, 'set_public_dns', set_public_dns)
if (set_public_ptr is not None):
pulumi.set(__self__, 'set_public_ptr', set_public_ptr)
if (type is not None):
pulumi.set(__self__, 'type', type)
|
@property
@pulumi.getter(name='externalIpv6')
def external_ipv6(self) -> Optional[pulumi.Input[str]]:
'\n The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.\n '
return pulumi.get(self, 'external_ipv6')
| -2,103,181,351,474,099,700
|
The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
external_ipv6
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='externalIpv6')
def external_ipv6(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'external_ipv6')
|
@property
@pulumi.getter(name='externalIpv6PrefixLength')
def external_ipv6_prefix_length(self) -> Optional[pulumi.Input[int]]:
'\n The prefix length of the external IPv6 range.\n '
return pulumi.get(self, 'external_ipv6_prefix_length')
| -3,134,954,939,221,654,500
|
The prefix length of the external IPv6 range.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
external_ipv6_prefix_length
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='externalIpv6PrefixLength')
def external_ipv6_prefix_length(self) -> Optional[pulumi.Input[int]]:
'\n \n '
return pulumi.get(self, 'external_ipv6_prefix_length')
|
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
'\n The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.\n '
return pulumi.get(self, 'name')
| 5,549,982,464,012,904,000
|
The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
name
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'name')
|
@property
@pulumi.getter(name='natIP')
def nat_ip(self) -> Optional[pulumi.Input[str]]:
'\n An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.\n '
return pulumi.get(self, 'nat_ip')
| -7,582,645,951,335,333,000
|
An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
nat_ip
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='natIP')
def nat_ip(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'nat_ip')
|
@property
@pulumi.getter(name='networkTier')
def network_tier(self) -> Optional[pulumi.Input['AccessConfigNetworkTier']]:
'\n This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP.\n '
return pulumi.get(self, 'network_tier')
| 4,396,969,283,455,139,300
|
This signifies the networking tier used for configuring this access configuration and can only take the following values: PREMIUM, STANDARD. If an AccessConfig is specified without a valid external IP address, an ephemeral IP will be created with this networkTier. If an AccessConfig with a valid external IP address is specified, it must match that of the networkTier associated with the Address resource owning that IP.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
network_tier
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='networkTier')
def network_tier(self) -> Optional[pulumi.Input['AccessConfigNetworkTier']]:
'\n \n '
return pulumi.get(self, 'network_tier')
|
@property
@pulumi.getter(name='publicPtrDomainName')
def public_ptr_domain_name(self) -> Optional[pulumi.Input[str]]:
'\n The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range.\n '
return pulumi.get(self, 'public_ptr_domain_name')
| 5,687,630,777,437,308,000
|
The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
public_ptr_domain_name
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='publicPtrDomainName')
def public_ptr_domain_name(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'public_ptr_domain_name')
|
@property
@pulumi.getter(name='setPublicDns')
def set_public_dns(self) -> Optional[pulumi.Input[bool]]:
"\n Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration.\n "
return pulumi.get(self, 'set_public_dns')
| -2,032,867,425,056,029,200
|
Specifies whether a public DNS 'A' record should be created for the external IP address of this access configuration.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
set_public_dns
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='setPublicDns')
def set_public_dns(self) -> Optional[pulumi.Input[bool]]:
"\n \n "
return pulumi.get(self, 'set_public_dns')
|
@property
@pulumi.getter(name='setPublicPtr')
def set_public_ptr(self) -> Optional[pulumi.Input[bool]]:
"\n Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated.\n "
return pulumi.get(self, 'set_public_ptr')
| -5,875,192,349,570,517,000
|
Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
set_public_ptr
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='setPublicPtr')
def set_public_ptr(self) -> Optional[pulumi.Input[bool]]:
"\n \n "
return pulumi.get(self, 'set_public_ptr')
|
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['AccessConfigType']]:
'\n The type of configuration. The default and only option is ONE_TO_ONE_NAT.\n '
return pulumi.get(self, 'type')
| -2,253,677,793,493,363,500
|
The type of configuration. The default and only option is ONE_TO_ONE_NAT.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
type
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['AccessConfigType']]:
'\n \n '
return pulumi.get(self, 'type')
|
def __init__(__self__, *, enable_nested_virtualization: Optional[pulumi.Input[bool]]=None, enable_uefi_networking: Optional[pulumi.Input[bool]]=None, numa_node_count: Optional[pulumi.Input[int]]=None, threads_per_core: Optional[pulumi.Input[int]]=None, visible_core_count: Optional[pulumi.Input[int]]=None):
"\n Specifies options for controlling advanced machine features. Options that would traditionally be configured in a BIOS belong here. Features that require operating system support may have corresponding entries in the GuestOsFeatures of an Image (e.g., whether or not the OS in the Image supports nested virtualization being enabled or disabled).\n :param pulumi.Input[bool] enable_nested_virtualization: Whether to enable nested virtualization or not (default is false).\n :param pulumi.Input[bool] enable_uefi_networking: Whether to enable UEFI networking for instance creation.\n :param pulumi.Input[int] numa_node_count: The number of vNUMA nodes.\n :param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.\n :param pulumi.Input[int] visible_core_count: The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.\n "
if (enable_nested_virtualization is not None):
pulumi.set(__self__, 'enable_nested_virtualization', enable_nested_virtualization)
if (enable_uefi_networking is not None):
pulumi.set(__self__, 'enable_uefi_networking', enable_uefi_networking)
if (numa_node_count is not None):
pulumi.set(__self__, 'numa_node_count', numa_node_count)
if (threads_per_core is not None):
pulumi.set(__self__, 'threads_per_core', threads_per_core)
if (visible_core_count is not None):
pulumi.set(__self__, 'visible_core_count', visible_core_count)
| -2,016,403,648,159,950,600
|
Specifies options for controlling advanced machine features. Options that would traditionally be configured in a BIOS belong here. Features that require operating system support may have corresponding entries in the GuestOsFeatures of an Image (e.g., whether or not the OS in the Image supports nested virtualization being enabled or disabled).
:param pulumi.Input[bool] enable_nested_virtualization: Whether to enable nested virtualization or not (default is false).
:param pulumi.Input[bool] enable_uefi_networking: Whether to enable UEFI networking for instance creation.
:param pulumi.Input[int] numa_node_count: The number of vNUMA nodes.
:param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
:param pulumi.Input[int] visible_core_count: The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
__init__
|
AaronFriel/pulumi-google-native
|
python
|
def __init__(__self__, *, enable_nested_virtualization: Optional[pulumi.Input[bool]]=None, enable_uefi_networking: Optional[pulumi.Input[bool]]=None, numa_node_count: Optional[pulumi.Input[int]]=None, threads_per_core: Optional[pulumi.Input[int]]=None, visible_core_count: Optional[pulumi.Input[int]]=None):
"\n Specifies options for controlling advanced machine features. Options that would traditionally be configured in a BIOS belong here. Features that require operating system support may have corresponding entries in the GuestOsFeatures of an Image (e.g., whether or not the OS in the Image supports nested virtualization being enabled or disabled).\n :param pulumi.Input[bool] enable_nested_virtualization: Whether to enable nested virtualization or not (default is false).\n :param pulumi.Input[bool] enable_uefi_networking: Whether to enable UEFI networking for instance creation.\n :param pulumi.Input[int] numa_node_count: The number of vNUMA nodes.\n :param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.\n :param pulumi.Input[int] visible_core_count: The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.\n "
if (enable_nested_virtualization is not None):
pulumi.set(__self__, 'enable_nested_virtualization', enable_nested_virtualization)
if (enable_uefi_networking is not None):
pulumi.set(__self__, 'enable_uefi_networking', enable_uefi_networking)
if (numa_node_count is not None):
pulumi.set(__self__, 'numa_node_count', numa_node_count)
if (threads_per_core is not None):
pulumi.set(__self__, 'threads_per_core', threads_per_core)
if (visible_core_count is not None):
pulumi.set(__self__, 'visible_core_count', visible_core_count)
|
@property
@pulumi.getter(name='enableNestedVirtualization')
def enable_nested_virtualization(self) -> Optional[pulumi.Input[bool]]:
'\n Whether to enable nested virtualization or not (default is false).\n '
return pulumi.get(self, 'enable_nested_virtualization')
| 2,855,175,695,610,519,600
|
Whether to enable nested virtualization or not (default is false).
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
enable_nested_virtualization
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='enableNestedVirtualization')
def enable_nested_virtualization(self) -> Optional[pulumi.Input[bool]]:
'\n \n '
return pulumi.get(self, 'enable_nested_virtualization')
|
@property
@pulumi.getter(name='enableUefiNetworking')
def enable_uefi_networking(self) -> Optional[pulumi.Input[bool]]:
'\n Whether to enable UEFI networking for instance creation.\n '
return pulumi.get(self, 'enable_uefi_networking')
| 8,976,873,232,754,444,000
|
Whether to enable UEFI networking for instance creation.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
enable_uefi_networking
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='enableUefiNetworking')
def enable_uefi_networking(self) -> Optional[pulumi.Input[bool]]:
'\n \n '
return pulumi.get(self, 'enable_uefi_networking')
|
@property
@pulumi.getter(name='numaNodeCount')
def numa_node_count(self) -> Optional[pulumi.Input[int]]:
'\n The number of vNUMA nodes.\n '
return pulumi.get(self, 'numa_node_count')
| 219,648,407,840,407,680
|
The number of vNUMA nodes.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
numa_node_count
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='numaNodeCount')
def numa_node_count(self) -> Optional[pulumi.Input[int]]:
'\n \n '
return pulumi.get(self, 'numa_node_count')
|
@property
@pulumi.getter(name='threadsPerCore')
def threads_per_core(self) -> Optional[pulumi.Input[int]]:
'\n The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.\n '
return pulumi.get(self, 'threads_per_core')
| -1,823,393,903,935,214,300
|
The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
threads_per_core
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='threadsPerCore')
def threads_per_core(self) -> Optional[pulumi.Input[int]]:
'\n \n '
return pulumi.get(self, 'threads_per_core')
|
@property
@pulumi.getter(name='visibleCoreCount')
def visible_core_count(self) -> Optional[pulumi.Input[int]]:
"\n The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.\n "
return pulumi.get(self, 'visible_core_count')
| 5,973,611,084,607,250,000
|
The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
visible_core_count
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='visibleCoreCount')
def visible_core_count(self) -> Optional[pulumi.Input[int]]:
"\n \n "
return pulumi.get(self, 'visible_core_count')
|
def __init__(__self__, *, ip_cidr_range: Optional[pulumi.Input[str]]=None, subnetwork_range_name: Optional[pulumi.Input[str]]=None):
"\n An alias IP range attached to an instance's network interface.\n :param pulumi.Input[str] ip_cidr_range: The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24).\n :param pulumi.Input[str] subnetwork_range_name: The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used.\n "
if (ip_cidr_range is not None):
pulumi.set(__self__, 'ip_cidr_range', ip_cidr_range)
if (subnetwork_range_name is not None):
pulumi.set(__self__, 'subnetwork_range_name', subnetwork_range_name)
| -545,162,838,509,802,430
|
An alias IP range attached to an instance's network interface.
:param pulumi.Input[str] ip_cidr_range: The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24).
:param pulumi.Input[str] subnetwork_range_name: The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
__init__
|
AaronFriel/pulumi-google-native
|
python
|
def __init__(__self__, *, ip_cidr_range: Optional[pulumi.Input[str]]=None, subnetwork_range_name: Optional[pulumi.Input[str]]=None):
"\n An alias IP range attached to an instance's network interface.\n :param pulumi.Input[str] ip_cidr_range: The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24).\n :param pulumi.Input[str] subnetwork_range_name: The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used.\n "
if (ip_cidr_range is not None):
pulumi.set(__self__, 'ip_cidr_range', ip_cidr_range)
if (subnetwork_range_name is not None):
pulumi.set(__self__, 'subnetwork_range_name', subnetwork_range_name)
|
@property
@pulumi.getter(name='ipCidrRange')
def ip_cidr_range(self) -> Optional[pulumi.Input[str]]:
'\n The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24).\n '
return pulumi.get(self, 'ip_cidr_range')
| -6,477,900,350,978,033,000
|
The IP alias ranges to allocate for this interface. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (such as 10.2.3.4), a netmask (such as /24) or a CIDR-formatted string (such as 10.1.2.0/24).
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
ip_cidr_range
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='ipCidrRange')
def ip_cidr_range(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'ip_cidr_range')
|
@property
@pulumi.getter(name='subnetworkRangeName')
def subnetwork_range_name(self) -> Optional[pulumi.Input[str]]:
'\n The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used.\n '
return pulumi.get(self, 'subnetwork_range_name')
| -5,519,317,913,827,085,000
|
The name of a subnetwork secondary IP range from which to allocate an IP alias range. If not specified, the primary range of the subnetwork is used.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
subnetwork_range_name
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='subnetworkRangeName')
def subnetwork_range_name(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'subnetwork_range_name')
|
def __init__(__self__, *, disk_size_gb: Optional[pulumi.Input[str]]=None, interface: Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']]=None):
"\n :param pulumi.Input[str] disk_size_gb: Specifies the size of the disk in base-2 GB.\n :param pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface'] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.\n "
if (disk_size_gb is not None):
pulumi.set(__self__, 'disk_size_gb', disk_size_gb)
if (interface is not None):
pulumi.set(__self__, 'interface', interface)
| 9,095,536,866,581,036,000
|
:param pulumi.Input[str] disk_size_gb: Specifies the size of the disk in base-2 GB.
:param pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface'] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
__init__
|
AaronFriel/pulumi-google-native
|
python
|
def __init__(__self__, *, disk_size_gb: Optional[pulumi.Input[str]]=None, interface: Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']]=None):
"\n :param pulumi.Input[str] disk_size_gb: Specifies the size of the disk in base-2 GB.\n :param pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface'] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.\n "
if (disk_size_gb is not None):
pulumi.set(__self__, 'disk_size_gb', disk_size_gb)
if (interface is not None):
pulumi.set(__self__, 'interface', interface)
|
@property
@pulumi.getter(name='diskSizeGb')
def disk_size_gb(self) -> Optional[pulumi.Input[str]]:
'\n Specifies the size of the disk in base-2 GB.\n '
return pulumi.get(self, 'disk_size_gb')
| -5,508,228,374,896,146,000
|
Specifies the size of the disk in base-2 GB.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
disk_size_gb
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='diskSizeGb')
def disk_size_gb(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'disk_size_gb')
|
@property
@pulumi.getter
def interface(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']]:
'\n Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.\n '
return pulumi.get(self, 'interface')
| -8,973,237,185,586,475,000
|
Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
interface
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter
def interface(self) -> Optional[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskInterface']]:
'\n \n '
return pulumi.get(self, 'interface')
|
def __init__(__self__, *, guest_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]=None, local_ssds: Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]]=None, location_hint: Optional[pulumi.Input[str]]=None, machine_type: Optional[pulumi.Input[str]]=None, maintenance_freeze_duration_hours: Optional[pulumi.Input[int]]=None, maintenance_interval: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval']]=None, min_cpu_platform: Optional[pulumi.Input[str]]=None):
"\n Properties of the SKU instances being reserved. Next ID: 9\n :param pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]] guest_accelerators: Specifies accelerator type and count.\n :param pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]] local_ssds: Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd.\n :param pulumi.Input[str] location_hint: An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API.\n :param pulumi.Input[str] machine_type: Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.\n :param pulumi.Input[int] maintenance_freeze_duration_hours: Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance.\n :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval'] maintenance_interval: For more information about maintenance intervals, see Setting maintenance intervals.\n :param pulumi.Input[str] min_cpu_platform: Minimum cpu platform the reservation.\n "
if (guest_accelerators is not None):
pulumi.set(__self__, 'guest_accelerators', guest_accelerators)
if (local_ssds is not None):
pulumi.set(__self__, 'local_ssds', local_ssds)
if (location_hint is not None):
pulumi.set(__self__, 'location_hint', location_hint)
if (machine_type is not None):
pulumi.set(__self__, 'machine_type', machine_type)
if (maintenance_freeze_duration_hours is not None):
pulumi.set(__self__, 'maintenance_freeze_duration_hours', maintenance_freeze_duration_hours)
if (maintenance_interval is not None):
pulumi.set(__self__, 'maintenance_interval', maintenance_interval)
if (min_cpu_platform is not None):
pulumi.set(__self__, 'min_cpu_platform', min_cpu_platform)
| 7,669,833,283,022,695,000
|
Properties of the SKU instances being reserved. Next ID: 9
:param pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]] guest_accelerators: Specifies accelerator type and count.
:param pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]] local_ssds: Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd.
:param pulumi.Input[str] location_hint: An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API.
:param pulumi.Input[str] machine_type: Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.
:param pulumi.Input[int] maintenance_freeze_duration_hours: Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance.
:param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval'] maintenance_interval: For more information about maintenance intervals, see Setting maintenance intervals.
:param pulumi.Input[str] min_cpu_platform: Minimum cpu platform the reservation.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
__init__
|
AaronFriel/pulumi-google-native
|
python
|
def __init__(__self__, *, guest_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]=None, local_ssds: Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]]=None, location_hint: Optional[pulumi.Input[str]]=None, machine_type: Optional[pulumi.Input[str]]=None, maintenance_freeze_duration_hours: Optional[pulumi.Input[int]]=None, maintenance_interval: Optional[pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval']]=None, min_cpu_platform: Optional[pulumi.Input[str]]=None):
"\n Properties of the SKU instances being reserved. Next ID: 9\n :param pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]] guest_accelerators: Specifies accelerator type and count.\n :param pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]] local_ssds: Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd.\n :param pulumi.Input[str] location_hint: An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API.\n :param pulumi.Input[str] machine_type: Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.\n :param pulumi.Input[int] maintenance_freeze_duration_hours: Specifies the number of hours after reservation creation where instances using the reservation won't be scheduled for maintenance.\n :param pulumi.Input['AllocationSpecificSKUAllocationReservedInstancePropertiesMaintenanceInterval'] maintenance_interval: For more information about maintenance intervals, see Setting maintenance intervals.\n :param pulumi.Input[str] min_cpu_platform: Minimum cpu platform the reservation.\n "
if (guest_accelerators is not None):
pulumi.set(__self__, 'guest_accelerators', guest_accelerators)
if (local_ssds is not None):
pulumi.set(__self__, 'local_ssds', local_ssds)
if (location_hint is not None):
pulumi.set(__self__, 'location_hint', location_hint)
if (machine_type is not None):
pulumi.set(__self__, 'machine_type', machine_type)
if (maintenance_freeze_duration_hours is not None):
pulumi.set(__self__, 'maintenance_freeze_duration_hours', maintenance_freeze_duration_hours)
if (maintenance_interval is not None):
pulumi.set(__self__, 'maintenance_interval', maintenance_interval)
if (min_cpu_platform is not None):
pulumi.set(__self__, 'min_cpu_platform', min_cpu_platform)
|
@property
@pulumi.getter(name='guestAccelerators')
def guest_accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]:
'\n Specifies accelerator type and count.\n '
return pulumi.get(self, 'guest_accelerators')
| 404,136,063,061,147,700
|
Specifies accelerator type and count.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
guest_accelerators
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='guestAccelerators')
def guest_accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]]:
'\n \n '
return pulumi.get(self, 'guest_accelerators')
|
@property
@pulumi.getter(name='localSsds')
def local_ssds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]]:
'\n Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd.\n '
return pulumi.get(self, 'local_ssds')
| -8,683,957,141,601,295,000
|
Specifies amount of local ssd to reserve with each instance. The type of disk is local-ssd.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
local_ssds
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='localSsds')
def local_ssds(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDiskArgs']]]]:
'\n \n '
return pulumi.get(self, 'local_ssds')
|
@property
@pulumi.getter(name='locationHint')
def location_hint(self) -> Optional[pulumi.Input[str]]:
'\n An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API.\n '
return pulumi.get(self, 'location_hint')
| 2,775,091,061,924,199,000
|
An opaque location hint used to place the allocation close to other resources. This field is for use by internal tools that use the public API.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
location_hint
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='locationHint')
def location_hint(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'location_hint')
|
@property
@pulumi.getter(name='machineType')
def machine_type(self) -> Optional[pulumi.Input[str]]:
'\n Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.\n '
return pulumi.get(self, 'machine_type')
| -261,754,411,147,046,600
|
Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.
|
sdk/python/pulumi_google_native/compute/alpha/_inputs.py
|
machine_type
|
AaronFriel/pulumi-google-native
|
python
|
@property
@pulumi.getter(name='machineType')
def machine_type(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'machine_type')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.